• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python schema.Index类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sqlalchemy.schema.Index的典型用法代码示例。如果您正苦于以下问题:Python Index类的具体用法?Python Index怎么用?Python Index使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Index类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: downgrade

def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    service_statuses = Table('service_statuses', meta, autoload=True)
    idx = Index("service_statuses_instance_id", service_statuses.c.instance_id)
    idx.drop()
开发者ID:AlexeyDeyneko,项目名称:trove,代码行数:7,代码来源:025_add_service_statuses_indexes.py


示例2: create_index

    def create_index(self, columns, name=None):
        """
        Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
        ::

            table.create_index(['name', 'country'])
        """
        self._check_dropped()
        if not name:
            sig = '||'.join(columns)

            # This is a work-around for a bug in <=0.6.1 which would create
            # indexes based on hash() rather than a proper hash.
            key = abs(hash(sig))
            name = 'ix_%s_%s' % (self.table.name, key)
            if name in self.indexes:
                return self.indexes[name]

            key = sha1(sig.encode('utf-8')).hexdigest()[:16]
            name = 'ix_%s_%s' % (self.table.name, key)

        if name in self.indexes:
            return self.indexes[name]
        try:
            self.database._acquire()
            columns = [self.table.c[c] for c in columns]
            idx = Index(name, *columns)
            idx.create(self.database.engine)
        except:
            idx = None
        finally:
            self.database._release()
        self.indexes[name] = idx
        return idx
开发者ID:reubano,项目名称:dataset,代码行数:34,代码来源:table.py


示例3: create_index

 def create_index(self, columns, name=None, index_type="btree"):
     """
     Create an index to speed up queries on a table.
     If no ``name`` is given a random name is created.
     ::
         table.create_index(['name', 'country'])
     """
     self._check_dropped()
     if not name:
         sig = "||".join(columns + [index_type])
         # This is a work-around for a bug in <=0.6.1 which would create
         # indexes based on hash() rather than a proper hash.
         key = abs(hash(sig))
         name = "ix_%s_%s" % (self.table.name, key)
         if name in self.indexes:
             return self.indexes[name]
         key = sha1(sig.encode("utf-8")).hexdigest()[:16]
         name = "ix_%s_%s" % (self.table.name, key)
     if name in self.indexes:
         return self.indexes[name]
     # self.db._acquire()
     columns = [self.table.c[col] for col in columns]
     idx = Index(name, *columns, postgresql_using=index_type)
     idx.create(self.engine)
     # finally:
     #    self.db._release()
     self.indexes[name] = idx
     return idx
开发者ID:smnorris,项目名称:pgdb,代码行数:28,代码来源:table.py


示例4: upgrade

def upgrade(migrate_engine):
    LOG.info(_LI("Adding boolean column delayed_notify to table 'zones'"))
    meta.bind = migrate_engine
    zones_table = Table('zones', meta, autoload=True)
    col = Column('delayed_notify', Boolean(), default=False)
    col.create(zones_table)
    index = Index('delayed_notify', zones_table.c.delayed_notify)
    index.create(migrate_engine)
开发者ID:ISCAS-VDI,项目名称:designate-base,代码行数:8,代码来源:084_add_delayed_notify_column.py


示例5: generate_key_index

 def generate_key_index(self):
     for index in self.key.table.indexes:
         if len(index.columns) == 1:
             for col in index.columns:
                 if col == self.key:
                     return
     index = Index(self.index_name, self.key)
     index.create(self.config.engine)
开发者ID:backgroundcheck,项目名称:linkage,代码行数:8,代码来源:model.py


示例6: upgrade

def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    service_statuses = Table('service_statuses', meta, autoload=True)
    idx = Index("service_statuses_instance_id", service_statuses.c.instance_id)

    try:
        idx.create()
    except OperationalError as e:
        logger.info(e)
开发者ID:Tesora,项目名称:tesora-trove,代码行数:11,代码来源:025_add_service_statuses_indexes.py


示例7: upgrade

def upgrade(migrate_engine):
    meta.bind = migrate_engine
    Table('datastores', meta, autoload=True)
    Table('datastore_versions', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    create_tables([clusters])
    instances.create_column(Column('cluster_id', String(36),
                                   ForeignKey("clusters.id")))
    instances.create_column(Column('shard_id', String(36)))
    instances.create_column(Column('type', String(64)))
    cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id)
    cluster_id_idx.create()
开发者ID:Hopebaytech,项目名称:trove,代码行数:12,代码来源:032_clusters.py


示例8: generate_key_index

    def generate_key_index(self):
        key = self.key
        table = key.table
        if isinstance(table, Alias):
            table = table.original
            key = table.c[key.name]

        for index in table.indexes:
            if len(index.columns) == 1:
                for col in index.columns:
                    if col == key:
                        return
        index = Index(self.index_name, key)
        index.create(self.config.engine)
开发者ID:pudo,项目名称:linkage,代码行数:14,代码来源:model.py


示例9: create_index

def create_index(engine, table, columns, name=None):
    with lock:
        if not name:
            sig = abs(hash('||'.join(columns)))
            name = 'ix_%s_%s' % (table.name, sig)
        if name in INDEXES:
            return INDEXES[name]
        try:
            columns = [table.c[c] for c in columns]
            idx = Index(name, *columns)
            idx.create(engine)
        except:
            idx = None
        INDEXES[name] = idx
        return idx
开发者ID:rossjones,项目名称:sqlaload,代码行数:15,代码来源:schema.py


示例10: create_index

def create_index(engine, table, columns, name=None):
    table = get_table(engine, table)
    with lock:
        if not name:
            sig = abs(hash("||".join(columns)))
            name = "ix_%s_%s" % (table.name, sig)
        if name in engine._indexes:
            return engine._indexes[name]
        try:
            columns = [table.c[c] for c in columns]
            idx = Index(name, *columns)
            idx.create(engine)
        except:
            idx = None
        engine._indexes[name] = idx
        return idx
开发者ID:jljs,项目名称:sqlaload,代码行数:16,代码来源:schema.py


示例11: upgrade

def upgrade(migrate_engine):
    meta.bind = migrate_engine
    Table('datastores', meta, autoload=True)
    Table('datastore_versions', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)

    # since the downgrade is a no-op, an upgrade after a downgrade will
    # cause an exception because the tables already exist
    # we will catch that case and log an info message
    try:
        create_tables([clusters])

        instances.create_column(Column('cluster_id', String(36),
                                       ForeignKey("clusters.id")))
        instances.create_column(Column('shard_id', String(36)))
        instances.create_column(Column('type', String(64)))

        cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id)
        cluster_id_idx.create()
    except OperationalError as e:
        logger.info(e)
开发者ID:rumale,项目名称:trove,代码行数:21,代码来源:032_clusters.py


示例12: create_index

    def create_index(self, columns, name=None):
        """
        Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
        ::

            table.create_index(['name', 'country'])
        """
        self._check_dropped()
        with self.database.lock:
            if not name:
                sig = abs(hash('||'.join(columns)))
                name = 'ix_%s_%s' % (self.table.name, sig)
            if name in self.indexes:
                return self.indexes[name]
            try:
                columns = [self.table.c[c] for c in columns]
                idx = Index(name, *columns)
                idx.create(self.database.engine)
            except:
                idx = None
            self.indexes[name] = idx
            return idx
开发者ID:aklaver,项目名称:dataset,代码行数:22,代码来源:table.py


示例13: visit_column

    def visit_column(self, column):
        """Create a column (table already exists).

        :param column: column object
        :type column: :class:`sqlalchemy.Column` instance
        """
        if column.default is not None:
            self.traverse_single(column.default)

        table = self.start_alter_table(column)
        self.append("ADD ")
        self.append(self.get_column_specification(column))

        for cons in column.constraints:
            self.traverse_single(cons)
        self.execute()

        # ALTER TABLE STATEMENTS

        # add indexes and unique constraints
        if column.index_name:
            ix = Index(column.index_name,
                       column,
                       unique=bool(column.index_name or column.index))
            ix.create()
        elif column.unique_name:
            constraint.UniqueConstraint(column,
                                        name=column.unique_name).create()

        # SA bounds FK constraints to table, add manually
        for fk in column.foreign_keys:
            self.add_foreignkey(fk.constraint)

        # add primary key constraint if needed
        if column.primary_key_name:
            cons = constraint.PrimaryKeyConstraint(column,
                                                   name=column.primary_key_name)
            cons.create()
开发者ID:hgroll,项目名称:yocto-autobuilder,代码行数:38,代码来源:ansisql.py


示例14: downgrade

def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)

    tenant_id_idx = Index("instances_tenant_id", instances.c.tenant_id)
    tenant_id_idx.drop()

    deleted_idx = Index("instances_deleted", instances.c.deleted)
    deleted_idx.drop()
开发者ID:cretta,项目名称:trove,代码行数:11,代码来源:023_add_instance_indexes.py


示例15: downgrade

def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    backups = Table('backups', meta, autoload=True)
    backups_instance_id_idx = Index("backups_instance_id",
                                    backups.c.instance_id)
    backups_deleted_idx = Index("backups_deleted", backups.c.deleted)

    meta.bind = migrate_engine
    backups_instance_id_idx.drop()
    backups_deleted_idx.drop()
开发者ID:AlexeyDeyneko,项目名称:trove,代码行数:12,代码来源:024_add_backup_indexes.py


示例16: upgrade

def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)

    tenant_id_idx = Index("instances_tenant_id", instances.c.tenant_id)

    try:
        tenant_id_idx.create()
    except OperationalError as e:
        logger.info(e)

    deleted_idx = Index("instances_deleted", instances.c.deleted)
    try:
        deleted_idx.create()
    except OperationalError as e:
        logger.info(e)
开发者ID:cretta,项目名称:trove,代码行数:18,代码来源:023_add_instance_indexes.py


示例17: upgrade

def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    backups = Table('backups', meta, autoload=True)
    backups_instance_id_idx = Index("backups_instance_id",
                                    backups.c.instance_id)
    backups_deleted_idx = Index("backups_deleted", backups.c.deleted)

    try:
        backups_instance_id_idx.create()
    except OperationalError as e:
        logger.info(e)

    try:
        backups_deleted_idx.create()
    except OperationalError as e:
        logger.info(e)
开发者ID:AlexeyDeyneko,项目名称:trove,代码行数:18,代码来源:024_add_backup_indexes.py


示例18: create_cube_aggregate

    def create_cube_aggregate(self, browser, table_name=None, dimensions=None,
                              dimension_links=None, schema=None,
                              replace=False):
        """Creates an aggregate table. If dimensions is `None` then all cube's
        dimensions are considered.

        Arguments:

        * `dimensions`: list of dimensions to use in the aggregated cuboid, if
          `None` then all cube dimensions are used
        * `dimension_links`: list of dimensions that are required for each
          aggregation (for example a date dimension in most of the cases). The
          list should be a subset of `dimensions`.
        * `aggregates_prefix`: aggregated table prefix
        * `aggregates_schema`: schema where aggregates are stored

        """

        if browser.store != self:
            raise ArgumentError("Can create aggregate table only within "
                                "the same store")

        schema = schema or self.options.get("aggregates_schema", self.schema)
        # Just a shortcut
        cube = browser.cube

        prefix = self.options.get("aggregates_prefix", "")
        table_name = table_name or "%s_%s" % (prefix, cube.name)

        if dimensions:
            dimensions = [cube.dimension(dimension) for dimension in dimensions]
        else:
            dimensions = cube.dimensions

        builder = QueryBuilder(browser)

        if builder.snowflake.fact_name == table_name and builder.snowflake.schema == schema:
            raise ArgumentError("target is the same as source fact table")

        drilldown = []
        keys = None
        for dimension in dimensions:
            levels = dimension.hierarchy().levels
            drilldown.append((dimension, dimension.hierarchy(), levels[-1]))
            keys = [l.key for l in levels]

        cell = Cell(cube)
        drilldown = Drilldown(drilldown, cell)

        # Create statement of all dimension level keys for
        # getting structure for table creation
        statement = builder.aggregation_statement(
            cell,
            drilldown=drilldown,
            aggregates=cube.aggregates,
            attributes=keys
        )

        # Create table
        table = self.create_table_from_statement(
            table_name,
            statement,
            schema=schema,
            replace=replace,
            insert=False
        )

        self.logger.info("Inserting...")

        with self.connectable.begin() as connection:

            insert = InsertIntoAsSelect(table, statement,
                                        columns=statement.columns)

            connection.execute(insert)
        self.logger.info("Done")

        self.logger.info("Creating indexes...")

        aggregated_columns = [a.name for a in cube.aggregates]
        for column in table.columns:
            if column.name in aggregated_columns:
                continue

            name = "%s_%s_idx" % (table_name, column)
            self.logger.info("creating index: %s" % name)
            index = Index(name, column)
            index.create(self.connectable)

        self.logger.info("Done")
开发者ID:dustinromey,项目名称:cubes,代码行数:90,代码来源:store.py


示例19: drop_index

def drop_index(index):
    if index_exists(index):
        index = Index(*index)
        index.drop()
开发者ID:ISCAS-VDI,项目名称:designate-base,代码行数:4,代码来源:080_domain_to_zone_rename.py


示例20: create_cube_aggregate

    def create_cube_aggregate(self, cube, table_name=None, dimensions=None,
                                 replace=False, create_index=False,
                                 schema=None):
        """Creates an aggregate table. If dimensions is `None` then all cube's
        dimensions are considered.

        Arguments:

        * `dimensions`: list of dimensions to use in the aggregated cuboid, if
          `None` then all cube dimensions are used
        """

        browser = SQLBrowser(cube, self, schema=schema)

        if browser.safe_labels:
            raise ConfigurationError("Aggregation does not work with "
                                     "safe_labels turned on")

        schema = schema or self.naming.aggregate_schema \
                    or self.naming.schema

        # TODO: this is very similar to the denormalization prep.
        table_name = table_name or self.naming.aggregate_table_name(cube.name)
        fact_name = cube.fact or self.naming.fact_table_name(cube.name)

        dimensions = dimensions or [dim.name for dim in cube.dimensions]

        if fact_name == table_name and schema == self.naming.schema:
            raise StoreError("Aggregation target is the same as fact")

        drilldown = []
        keys = []
        for dimref in dimensions:
            (dimname, hiername, level) = string_to_dimension_level(dimref)
            dimension = cube.dimension(dimname)
            hierarchy = dimension.hierarchy(hiername)
            levels = hierarchy.levels
            drilldown.append((dimension, hierarchy, levels[-1]))
            keys += [l.key for l in levels]

        cell = Cell(cube)
        drilldown = Drilldown(drilldown, cell)

        # Create statement of all dimension level keys for
        # getting structure for table creation
        (statement, _) = browser.aggregation_statement(
            cell,
            drilldown=drilldown,
            aggregates=cube.aggregates
        )

        # Create table
        table = self.create_table_from_statement(
            table_name,
            statement,
            schema=schema,
            replace=replace,
            insert=False
        )

        self.logger.info("Inserting...")

        insert = table.insert().from_select(statement.columns, statement)
        self.execute(insert)

        self.logger.info("Done")

        if create_index:
            self.logger.info("Creating indexes...")
            aggregated_columns = [a.name for a in cube.aggregates]
            for column in table.columns:
                if column.name in aggregated_columns:
                    continue

                name = "%s_%s_idx" % (table_name, column)
                self.logger.info("creating index: %s" % name)
                index = Index(name, column)
                index.create(self.connectable)

        self.logger.info("Done")
开发者ID:NoemiNahomy,项目名称:cubes,代码行数:80,代码来源:store.py



注:本文中的sqlalchemy.schema.Index类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python schema.MetaData类代码示例发布时间:2022-05-27
下一篇:
Python schema.Column类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap