• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python expression.bindparam函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sqlalchemy.sql.expression.bindparam函数的典型用法代码示例。如果您正苦于以下问题:Python bindparam函数的具体用法?Python bindparam怎么用?Python bindparam使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了bindparam函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: add_father_ids

def add_father_ids(engine):
    ct = table_scheme.categories_t
    connection = engine.connect()

    #prepared statements
    selection = ct.select().where(ct.c.Topic==bindparam('f_topic'))
    fid_update = ct.update().where(ct.c.catid==bindparam('child_id')).values(fatherid=bindparam('fatherid_'))
    all_categories = connection.execute('SELECT * FROM categories')

    counter = 0
    sys.stdout.write('\n')
    for row in all_categories:
        counter += 1
        topic = row.Topic
        title = row.Title
        catid = row.catid
        if catid < 3: #ignore "" and "Top"
            continue

        index = len(topic)-(len(title)+1)
        father_topic = topic[:index]

        father_selection = connection.execute(selection, f_topic=father_topic)
        father = father_selection.first()
        if father == None:
            LOG.debug('Found no father for "{0}", searched for "{1}"'.format(topic, father_topic))
            continue
        father_id = father[ct.c.catid]
        connection.execute(fid_update, child_id=catid, fatherid_=father_id)
        if counter % 10000 == 0:
            sys.stdout.write('.')
            if counter % 200000 == 0:
                sys.stdout.write(' - {0} ids generated\n'.format(counter))
            sys.stdout.flush()
    print
开发者ID:AbhishekGhosh,项目名称:dmoz2db,代码行数:35,代码来源:dmoz2db.py


示例2: list_expired_dids

def list_expired_dids(worker_number=None, total_workers=None, limit=None, session=None):
    """
    List expired data identifiers.

    :param limit: limit number.
    :param session: The database session in use.
    """
    query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\
        filter(models.DataIdentifier.expired_at < datetime.utcnow()).\
        order_by(models.DataIdentifier.expired_at).\
        with_hint(models.DataIdentifier, "index(DIDS DIDS_EXPIRED_AT_IDX)", 'oracle')

    if worker_number and total_workers and total_workers - 1 > 0:
        if session.bind.dialect.name == 'oracle':
            bindparams = [bindparam('worker_number', worker_number-1), bindparam('total_workers', total_workers-1)]
            query = query.filter(text('ORA_HASH(name, :total_workers) = :worker_number', bindparams=bindparams))
        elif session.bind.dialect.name == 'mysql':
            query = query.filter('mod(md5(name), %s) = %s' % (total_workers - 1, worker_number - 1))
        elif session.bind.dialect.name == 'postgresql':
            query = query.filter('mod(abs((\'x\'||md5(name))::bit(32)::int), %s) = %s' % (total_workers-1, worker_number-1))
        elif session.bind.dialect.name == 'sqlite':
            row_count = 0
            dids = list()
            for scope, name, did_type in query.yield_per(10):
                if int(md5(name).hexdigest(), 16) % total_workers == worker_number-1:
                    dids.append({'scope': scope, 'name': name, 'did_type': did_type})
                    row_count += 1
                if limit and row_count >= limit:
                    return dids
            return dids

    if limit:
        query = query.limit(limit)

    return [{'scope': scope, 'name': name, 'did_type': did_type} for scope, name, did_type in query]
开发者ID:pombredanne,项目名称:rucio,代码行数:35,代码来源:did.py


示例3: update_item_saved_info

def update_item_saved_info(item):
    
        
    engine = get_onitsuka_db_engine()
    
    item_owner_id = item['owner_id']
    item_id = item['item_id']
    
    user_following = Table('user_following', metaData, autoload=True, autoload_with = engine)
    s = select([user_following.c.user_id], (user_following.c.following_id==item_owner_id))
    
    result = engine.execute(s)
    
    user_feed_update_list = list()
    for follower in result:
        
        item_owner_follower_id = follower['user_id']
        print item_owner_follower_id
        
        user_feed_update_item = {}
        user_feed_update_item['user_id']  = item_owner_follower_id
        user_feed_update_item['owner_id'] = item_owner_id
        user_feed_update_item['item_id'] = item_id
        user_feed_update_list.append(user_feed_update_item)

    result.close()

    user_feed_table = Table('user_feed', metaData, autoload=True, autoload_with = engine)
    ins = user_feed_table.insert().values(user_id=bindparam('user_id'), owner_id=bindparam('owner_id'), item_id=bindparam('item_id'))
    engine.execute(ins, user_feed_update_list)
开发者ID:timewalker00,项目名称:gotwish-onitsuka-worker,代码行数:30,代码来源:dao.py


示例4: run

 def run(self):
     session = self.session()
     engine = session._WopMarsSession__session.bind
     conn = engine.connect()
     #
     snp2phenotype_path = self.input_file(InsertSNP2Phenotype.__input_file_snp2phenotype)
     snp_model = self.input_table(InsertSNP2Phenotype.__input_table_snp)
     phenotype_model = self.input_table(InsertSNP2Phenotype.__input_table_phenotype)
     snp2phenotype_model = self.output_table(InsertSNP2Phenotype.__output_table_snp2phenotype)
     snp2phenotype_df = pandas.read_table(snp2phenotype_path, header=None)
     #
     # read input file
     input_file_obj_list = []
     for line in csv.reader(open(snp2phenotype_path, 'r', encoding='utf-8'), delimiter="\t"):
         snp_rsid = int(line[0])
         phenotype_name = line[1]
         input_file_obj_list.append({'snp_rsid' : snp_rsid, 'phenotype_name' : phenotype_name})
     #
     # create insert
     snp_select = select([snp_model.id]).where(snp_model.rsid==bindparam('snp_rsid'))
     phenotype_select = select([phenotype_model.id]).where(phenotype_model.name==bindparam('phenotype_name'))
     output_table_insert = insert(table=snp2phenotype_model.__table__, values={'snp_id': snp_select, 'phenotype_id': phenotype_select})
     #
     if len(input_file_obj_list) > 0:
         if str(engine.__dict__['url']).split("://")[0]=='sqlite':
             engine.execute(output_table_insert.prefix_with("OR IGNORE", dialect='sqlite'), input_file_obj_list)
         elif str(engine.__dict__['url']).split("://")[0]=='mysql':
                 from warnings import filterwarnings # three lines to suppress mysql warnings
                 import MySQLdb as Database
                 filterwarnings('ignore', category = Database.Warning)
                 engine.execute(output_table_insert.prefix_with("IGNORE", dialect='mysql'), input_file_obj_list)
         elif str(engine.__dict__['url']).split("://")[0]=='postgresql':
             from sqlalchemy.dialects.postgresql import insert as pg_insert
             output_table_insert_pg = pg_insert(table=snp2phenotype_model.__table__, values={'snp_id': snp_select, 'phenotype_id': phenotype_select}).on_conflict_do_nothing(index_elements=['snp_id', 'phenotype_id'])
             engine.execute(output_table_insert_pg, input_file_obj_list)
开发者ID:aitgon,项目名称:wopmars,代码行数:35,代码来源:InsertSNP2Phenotype.py


示例5: visit_idea

 def visit_idea(self, idea, level, prev_result):
     if idea.short_title:
         self.counter.add_text(self.cleantext(idea.short_title), 2)
     if idea.long_title:
         self.counter.add_text(self.cleantext(idea.long_title))
     if idea.definition:
         self.counter.add_text(self.cleantext(idea.definition))
     if self.count_posts and level == 0:
         from .generic import Content
         related = text(
             Idea._get_related_posts_statement(),
             bindparams=[bindparam('root_idea_id', idea.id),
                         bindparam('discussion_id', idea.discussion_id)]
             ).columns(column('post_id')).alias('related')
         titles = set()
         # TODO maparent: Reoptimize
         for content in idea.db.query(
                 Content).join(
                 related, related.c.post_id == Content.id):
             body = content.body.first_original().value
             self.counter.add_text(self.cleantext(body), 0.5)
             title = content.subject.first_original().value
             title = self.cleantext(title)
             if title not in titles:
                 self.counter.add_text(title)
                 titles.add(title)
开发者ID:Lornz-,项目名称:assembl,代码行数:26,代码来源:idea.py


示例6: handle

    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError('Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)

        observation_period = pedsnet_session.query(ObservationPeriod.person_id,
                                                   ObservationPeriod.observation_period_start_date,
                                                   ObservationPeriod.observation_period_end_date,
                                                   ObservationPeriod.site,
                                                   bindparam("chart", 'Y'),
                                                   bindparam("enr_basis", 'E')
                                                   ).filter(
            exists().where(ObservationPeriod.person_id == PersonVisit.person_id)).all()

        odo(observation_period, Enrollment.__table__,
            dshape='var * {patid: string, enr_start_date: date, enr_end_date: date, site: string, chart: String, '
                   'enr_basis: String} '
            )
        # close session
        pedsnet_session.close()

        # ouutput result
        self.stdout.ending = ''
        print('Enrollment ETL completed successfully', end='', file=self.stdout)
开发者ID:PEDSnet,项目名称:pedsnetcdm_to_pcornetcdm,代码行数:29,代码来源:enrollmentETL.py


示例7: demographic_etl

def demographic_etl(config):
    # set up
    connection = get_connection(config)
    pedsnet_session = init_pedsnet(connection)
    init_pcornet(connection)

    # multiple aliases for pedsnet_pcornet_valueset_map
    # to allow the three named joins
    gender_value_map = aliased(ValueSetMap)
    ethnicity_value_map = aliased(ValueSetMap)
    race_value_map = aliased(ValueSetMap)

    # extract the data from the person table
    person = pedsnet_session.query(Person.person_id,
                                   Person.birth_date,
                                   Person.birth_time,
                                   coalesce(gender_value_map.target_concept, 'OT'),
                                   coalesce(ethnicity_value_map.target_concept, 'OT'),
                                   coalesce(race_value_map.target_concept, 'OT'),
                                   bindparam("biobank_flag", "N"),
                                   Person.gender_source_value,
                                   Person.ethnicity_source_value,
                                   Person.race_source_value,
                                   Person.site,
                                   bindparam("gender_identity", None),
                                   bindparam("raw_gender_identity", None),
                                   bindparam("sexual_orientation", None),
                                   bindparam("raw_sexual_orientation", None)
                                   ). \
        outerjoin(gender_value_map,
                  and_(gender_value_map.source_concept_class == 'Gender',
                       case([(and_(Person.gender_concept_id == None,
                                   gender_value_map.source_concept_id == None), True)],
                            else_=cast(Person.gender_concept_id, String(200)) ==
                                  gender_value_map.source_concept_id))). \
        outerjoin(ethnicity_value_map,
                  and_(ethnicity_value_map.source_concept_class == 'Hispanic',
                       case([(and_(Person.ethnicity_concept_id == None,
                                   ethnicity_value_map.source_concept_id == None), True)],
                            else_=cast(Person.ethnicity_concept_id, String(200)) ==
                                  ethnicity_value_map.source_concept_id))). \
        outerjoin(race_value_map,
                  and_(race_value_map.source_concept_class == 'Race',
                       case([(and_(Person.race_concept_id == None,
                                   race_value_map.source_concept_id == None), True)],
                            else_=cast(Person.race_concept_id, String(200)) ==
                                  race_value_map.source_concept_id))).all()

    # transform data to pcornet names and types
    # load to demographic table
    odo(person, Demographic.__table__,
        dshape='var * {patid: string, birth_date: date, birth_time: string, sex: string,'
               'hispanic: string, race: string, biobank_flag: string, raw_sex: string,'
               'raw_hispanic: string, raw_race:string, site: string, gender_identity: string,'
               'raw_gender_identity: string, sexual_orientation: string, raw_sexual_orientation: string}'
        )
    # close session

    pedsnet_session.close()
开发者ID:PEDSnet,项目名称:pedsnetcdm_to_pcornetcdm,代码行数:59,代码来源:demographicsETL.py


示例8: insert_stock_data

def insert_stock_data(country, market_gsi):
    market_id = [ct[0] for ct in markets if ct[1] == market_gsi][0]
    insert_stmt = tc_company_stock_prices.insert().values(for_date=bindparam('DailyDate'),
                                                          market_id=bindparam('market_id'),
                                                          company_id=bindparam('CompanyID'),
                                                          open=bindparam('Open'),
                                                          max=bindparam('Max'),
                                                          min=bindparam('Min'),
                                                          close=bindparam('Close'),
                                                          volume=bindparam('Volume'),
                                                          amount=bindparam('Amount'))
    print(insert_stmt)

    result = _read_data(country + '.csv')

    for item in result:
        item['DailyDate'] = datetime.strptime(item['DailyDate'], "%Y-%m-%d").date()
        item['CompanyID'] = int(item['CompanyID'])
        item['Open'] = float(item['Open'])
        item['Close'] = float(item['Close'])
        item['Min'] = float(item['Min'])
        item['Max'] = float(item['Max'])
        item['Volume'] = int(item['Volume'])
        item['Amount'] = int(item['Amount'])
        item['market_id'] = market_id

    # for i, row in enumerate(result):
    #     print(row)
    #     if i == 10:
    #         break

    with engine.connect() as conn:
        conn.execute(insert_stmt, result)
开发者ID:adnanshussain,项目名称:FintechPy,代码行数:33,代码来源:read_compute_load_historical_data.py


示例9: run_letter

def run_letter(letter, session, doctype='grant'):
    schema = RawLawyer
    if doctype == 'application':
        schema = App_RawLawyer
    letter = letter.upper()
    clause1 = schema.organization.startswith(bindparam('letter',letter))
    clause2 = schema.name_first.startswith(bindparam('letter',letter))
    clauses = or_(clause1, clause2)
    lawyers = (lawyer for lawyer in session.query(schema).filter(clauses))
    block = clean_lawyers(lawyers)
    create_jw_blocks(block)
    create_lawyer_table(session)
开发者ID:Grace,项目名称:patentprocessor,代码行数:12,代码来源:lawyer_disambiguation.py


示例10: run_letter

def run_letter(letter, session, doctype='grant'):
    schema = RawAssignee
    if doctype == 'application':
        schema = App_RawAssignee
    letter = letter.upper()
    clause1 = schema.organization.startswith(bindparam('letter',letter))
    clause2 = schema.name_first.startswith(bindparam('letter',letter))
    clauses = or_(clause1, clause2)
    assignees = (assignee for assignee in session.query(schema).filter(clauses))
    block = clean_assignees(assignees)
    create_jw_blocks(block)
    create_assignee_table(session)
开发者ID:phestoem,项目名称:patentprocessor,代码行数:12,代码来源:assignee_disambiguation.py


示例11: get_context_data

    def get_context_data(self, **kwargs):
        filter_form = ProgressFilterForm(request.args)

        conclusion_type = filter_form.conclusion.data
        dataset = filter_form.dataset
        status_level = self.model_eu_cls.conclusion_status_level2
        label_type = self.TREND_LABEL
        species = []
        if conclusion_type:
            if conclusion_type == 'bs':
                status_level = self.model_eu_cls.conclusion_status_level1
                conclusion_value = self.model_eu_cls.conclusion_status_label
                label_type = self.STATUS_LABEL
            elif conclusion_type == 'stbp':
                conclusion_value = self.model_eu_cls.br_population_trend
            elif conclusion_type == 'ltbp':
                conclusion_value = self.model_eu_cls.br_population_trend_long
            elif conclusion_type == 'stwp':
                conclusion_value = self.model_eu_cls.wi_population_trend
            elif conclusion_type == 'ltwp':
                conclusion_value = self.model_eu_cls.wi_population_trend_long
            else:
                raise ValueError('Unknown conclusion type')
            eu_species = self.get_species_qs(dataset,
                                             conclusion_value,
                                             status_level)

            ignore_species = (
                self.model_eu_cls.query
                .with_entities(self.model_eu_cls.speciescode)
            )
            ms_species = (
                LuDataBird.query
                .filter(~LuDataBird.speciescode.in_(ignore_species))
                .filter_by(dataset=dataset)
                .with_entities(LuDataBird.speciescode.label('code'),
                               LuDataBird.speciesname.label('name'),
                               bindparam('conclution', ''),
                               bindparam('status', ''),
                               bindparam('additional_record', 0))
            )

            species = sorted(eu_species.union(ms_species),
                             key=lambda x: x.name)

        return {
            'filter_form': filter_form,
            'species': species,
            'current_selection': filter_form.get_selection(),
            'dataset': dataset,
            'label_type': label_type,
        }
开发者ID:eea,项目名称:art12-viewer,代码行数:52,代码来源:views.py


示例12: upgrade_severity_levels

def upgrade_severity_levels(session, severity_map):
    """
    Updates the potentially changed severities at the reports.
    """
    LOG.debug("Upgrading severity levels started...")

    # Create a sql query from the severity map.
    severity_map_q = union_all(*[
        select([cast(bindparam('checker_id' + str(i), str(checker_id))
                .label('checker_id'), sqlalchemy.String),
                cast(bindparam('severity' + str(i), Severity._NAMES_TO_VALUES[
                    severity_map[checker_id]])
               .label('severity'), sqlalchemy.Integer)])
        for i, checker_id in enumerate(severity_map)]) \
        .alias('new_severities')

    checker_ids = severity_map.keys()

    # Get checkers which has been changed.
    changed_checker_q = select([Report.checker_id, Report.severity]) \
        .group_by(Report.checker_id, Report.severity) \
        .where(Report.checker_id.in_(checker_ids)) \
        .except_(session.query(severity_map_q)).alias('changed_severites')

    changed_checkers = session.query(changed_checker_q.c.checker_id,
                                     changed_checker_q.c.severity)

    # Update severity levels of checkers.
    if changed_checkers:
        updated_checker_ids = set()
        for checker_id, severity_old in changed_checkers:
            severity_new = severity_map.get(checker_id, 'UNSPECIFIED')
            severity_id = Severity._NAMES_TO_VALUES[severity_new]

            LOG.info("Upgrading severity level of '%s' checker from %s to %s",
                     checker_id,
                     Severity._VALUES_TO_NAMES[severity_old],
                     severity_new)

            if checker_id in updated_checker_ids:
                continue

            session.query(Report) \
                .filter(Report.checker_id == checker_id) \
                .update({Report.severity: severity_id})

            updated_checker_ids.add(checker_id)

        session.commit()

    LOG.debug("Upgrading of severity levels finished...")
开发者ID:Ericsson,项目名称:codechecker,代码行数:51,代码来源:db_cleanup.py


示例13: db_operates

def db_operates(action, conn, tbl, rows, pk=['id']):
	if rows is None or len(rows)==0: return 0
	cnt = 0
	if action in ('del', 'mod'):
		# generate where clause
		u_where_params = []
		for o in pk: 
			if action=='mod': u_where_params.append(tbl.c[o]==bindparam('_'+o))
			else: u_where_params.append(tbl.c[o]==bindparam(o))
		u_where_clause = and_(*u_where_params)

	if action=='add':
		if len(rows)==-1:
			respxy = conn.execute(tbl.insert(), rows[0])
			for idx in xrange(0, len(pk)):
				rows[0][pk[idx]]=respxy.inserted_primary_key[idx]
		else:
			respxy = conn.execute(tbl.insert(), rows)

		cnt = respxy.rowcount
	elif action=='mod':
		# generate values params
		u_value_keys = {}
		def prepare_columns(t_k, row_):
			for k in row_.keys():
				if tbl.columns.has_key(k) and not k in pk: 
					if u_value_keys.has_key(k):
						t_k[k] = u_value_keys[k]
					else:
						t_k[k] = u_value_keys[k] = bindparam(k)

		# preparation for key=id
		t_u_value_keys = {}
		for row in rows:
			prepare_columns(t_u_value_keys, row)
			for k in row.keys(): 
				if k in pk: row['_'+k]=row[k]
			st = tbl.update().where(u_where_clause).values(**t_u_value_keys)
			respxy = conn.execute(st, [row])
			cnt += respxy.rowcount
			t_u_value_keys.clear()
			del st
		# reset for key=id
		for row in rows:
			for k in row.keys():
				if k in pk: del row['_'+k]
	elif action=='del':
		respxy = conn.execute(tbl.delete().where(u_where_clause), rows)
		cnt = respxy.rowcount

	return cnt
开发者ID:WeiHsinChen,项目名称:RecommendationAPI,代码行数:51,代码来源:utils.py


示例14: get_user_id

def get_user_id(email = None, session_id = None):
    """ Helper function that returns the user_id for a given email address """
    if email is not None:
        result = db.session.execute(
            text("SELECT aaa.get_user_id_by_email(:email)",
                 bindparams=[bindparam('email', email)]))
        return result.first()[0]

    if session_id is not None:
        result = db.session.execute(
            text("SELECT aaa.get_user_id_by_session_id(:session)",
                 bindparams=[bindparam('session', session_id)]))
        return result.first()[0]
    return None
开发者ID:Mondego,项目名称:pyreco,代码行数:14,代码来源:allPythonContent.py


示例15: _update

    def _update(self, type, offset, values):
        """

        :param type: The type prefix to use
        :param offset: The address offset to start at
        :param values: The values to set
        """
        context = self._build_set(type, offset, values, prefix='x_')
        query = self._table.update().values(value='value')
        query = query.where(and_(
            self._table.c.type == bindparam('x_type'),
            self._table.c.index == bindparam('x_index')))
        result = self._connection.execute(query, context)
        return result.rowcount == len(values)
开发者ID:bashwork,项目名称:pymodbus,代码行数:14,代码来源:sql_datastore.py


示例16: insert_splits_data

def insert_splits_data():
    stmt_insert_splits = splits_table.insert().values(split_id=bindparam('SplitID'), split_date=bindparam('SplitDate', type_=Date),
                                                      company_id=bindparam('CompanyID'), ratio=bindparam('Ratio;'))
    print(stmt_insert_splits)

    result = _read_data('splits.csv')
    for item in result:
        item['SplitDate'] = datetime.strptime(item['SplitDate'], "%Y-%m-%d").date()
        item['Ratio;'] = float(item['Ratio;'][0:len(item['Ratio;']) - 1])

    # for row in result:
    #     print(row)

    with engine.connect() as conn:
        conn.execute(stmt_insert_splits, result)
开发者ID:adnanshussain,项目名称:FintechPy,代码行数:15,代码来源:read_compute_load_historical_data.py


示例17: handle

    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError('Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)
        init_vocab(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_cause = pedsnet_session.query(DeathPedsnet.person_id,
                                            func.left(DeathPedsnet.cause_source_value, 8),
                                            coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                            bindparam("death_cause_type", "NI"),
                                            bindparam("death_cause_source", "L"),
                                            bindparam("death_cause_confidence", None),
                                            min(DeathPedsnet.site)
                                            ) \
            .join(Demographic, Demographic.patid == cast(DeathPedsnet.person_id, String(256)), ) \
            .join(VocabularyConcept, VocabularyConcept.concept_id == DeathPedsnet.cause_concept_id) \
            .outerjoin(pedsnet_pcornet_valueset_map,
                       and_(pedsnet_pcornet_valueset_map.source_concept_class == 'death cause code',
                            cast(VocabularyConcept.vocabulary_id, String(200)) ==
                            pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(DeathPedsnet.cause_source_value != None,
                         DeathPedsnet.cause_source_concept_id != 44814650)) \
            .group_by(DeathPedsnet.person_id, func.left(DeathPedsnet.cause_source_value, 8),
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_cause, DeathCause.__table__,
            dshape='var * {patid: string, death_cause: string, death_cause_code: string,'
                   'death_cause_type: string, death_cause_source:string, '
                   'death_cause_confidence: string, site: string}'
            )

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death Cause ETL completed successfully', end='', file=self.stdout)
开发者ID:PEDSnet,项目名称:pedsnetcdm_to_pcornetcdm,代码行数:48,代码来源:deathCauseETL.py


示例18: list_unlocked_replicas

def list_unlocked_replicas(rse, limit, bytes=None, rse_id=None, worker_number=None, total_workers=None, delay_seconds=0, session=None):
    """
    List RSE File replicas with no locks.

    :param rse: the rse name.
    :param bytes: the amount of needed bytes.
    :param session: The database session in use.

    :returns: a list of dictionary replica.
    """
    if not rse_id:
        rse_id = get_rse_id(rse=rse, session=session)

    # filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED).\
    none_value = None  # Hack to get pep8 happy...
    query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone).\
        filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\
        filter(models.RSEFileAssociation.lock_cnt == 0).\
        filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\
        filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE)),
                   and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\
        order_by(models.RSEFileAssociation.tombstone).\
        with_hint(models.RSEFileAssociation, "INDEX(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle')

    if worker_number and total_workers and total_workers - 1 > 0:
        if session.bind.dialect.name == 'oracle':
            bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]
            query = query.filter(text('ORA_HASH(name, :total_workers) = :worker_number', bindparams=bindparams))
        elif session.bind.dialect.name == 'mysql':
            query = query.filter('mod(md5(name), %s) = %s' % (total_workers - 1, worker_number - 1))
        elif session.bind.dialect.name == 'postgresql':
            query = query.filter('mod(abs((\'x\'||md5(name))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))

    query = query.limit(limit)

    rows = list()
    neededSpace = bytes
    totalbytes = 0
    for (scope, name, bytes, tombstone) in query.yield_per(1000):

        if tombstone != OBSOLETE and neededSpace is not None and totalbytes >= neededSpace:
            break

        d = {'scope': scope, 'name': name, 'bytes': bytes}
        rows.append(d)
        if tombstone != OBSOLETE:
            totalbytes += bytes
    return rows
开发者ID:pombredanne,项目名称:rucio,代码行数:48,代码来源:replica.py


示例19: prepare_columns

		def prepare_columns(t_k, row_):
			for k in row_.keys():
				if tbl.columns.has_key(k) and not k in pk: 
					if u_value_keys.has_key(k):
						t_k[k] = u_value_keys[k]
					else:
						t_k[k] = u_value_keys[k] = bindparam(k)
开发者ID:WeiHsinChen,项目名称:RecommendationAPI,代码行数:7,代码来源:utils.py


示例20: update_column_mappings

def update_column_mappings(rule_map_list, ref_table_name):
    '''
    loop through the column mapping rows in the database and populate the
    stored procedure column based on the transformation name
    @param rule_map_list: A list of tuples containing mapping info. Tuples should be: (rule_name, proc_name)
    @param engine: sqlalchemy engine object
    @param conn: sqlalchemy connection object
    @param ref_schema: the name of the reference schema
    @param ref_table_name: the name of the reference table containing the column mapping info
    '''

    # check that list is not empty before preceding.
    if not rule_map_list:
        print('NO FUNCTIONS ADDED TO DATABASE')
        return
    with get_udl_connection() as conn:
        # get column_mapping table object
        col_map_table = conn.get_table(ref_table_name)

        # Generate sql to perform update
        update_stmt = col_map_table.update().where(col_map_table.c.transformation_rule == bindparam('rule_name'))
        update_stmt = update_stmt.values(stored_proc_name=bindparam('proc_name'), stored_proc_created_date=datetime.datetime.now())

        # Create list of dicts that sqlalchemy will recognize
        # to update all rules with corresponding stored procedure.
        for pair in rule_map_list:
            conn.execute(update_stmt, rule_name=pair[0], proc_name=pair[1])
开发者ID:SmarterApp,项目名称:RDW_DataWarehouse,代码行数:27,代码来源:populate_ref_info.py



注:本文中的sqlalchemy.sql.expression.bindparam函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python expression.case函数代码示例发布时间:2022-05-27
下一篇:
Python expression.between函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap