本文整理汇总了Python中sqlalchemy.ext.sqlsoup.SqlSoup类的典型用法代码示例。如果您正苦于以下问题:Python SqlSoup类的具体用法?Python SqlSoup怎么用?Python SqlSoup使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SqlSoup类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, billings_path):
db_path = os.path.abspath(os.path.join(billings_path, "Database", "billings.bid"))
uri = "sqlite:///" + db_path
SqlSoup.__init__(self, uri)
self.setup_constants()
self.setup_relations()
self.setup_column_introspection()
开发者ID:11craft,项目名称:blib,代码行数:7,代码来源:database.py
示例2: raw_sql
def raw_sql(sql):
engine = create_engine(
"mysql://%s:%[email protected]%s/%s"
% (CLOUD_MONITOR_MYSQL_USERNAME, CLOUD_MONITOR_MYSQL_PASSWORD, CLOUD_MONITOR_MYSQL_HOST, CLOUD_MONITOR_MYSQL_DB)
)
db = SqlSoup(engine)
return db.execute(sql).fetchall()
开发者ID:shelmesky,项目名称:cloud_manage_server,代码行数:7,代码来源:db_rawsql.py
示例3: rest_service
def rest_service( request, service, version, resource ):
db = SqlSoup('postgres://%s/%s' % ('localhost', 'mypam'))
method = request.method
rp = db.execute("""SELECT rest.datasource.url, rest.resource.query, standard.array_accum( rest.parameter.name ) FROM rest.datasource
JOIN rest.service ON rest.service.datasource_id=rest.datasource.id
JOIN rest.version ON rest.version.service_id=rest.service.id
JOIN rest.resource ON rest.resource.version_id=rest.version.id
LEFT JOIN rest.parameter ON rest.parameter.resource_id=rest.resource.id
WHERE rest.service.name=:service AND rest.version.version=:version AND rest.resource.name=:resource AND rest.resource.method=:method
GROUP BY rest.datasource.url, rest.resource.query""",
params={ 'service' : service, 'version' : version, 'resource' : resource, 'method' : method })
row = rp.first()
if row != None:
( datasource_url, query, parameters ) = row
service_db = SqlSoup( datasource_url )
query_params = {}
for parameter in parameters:
if parameter != None:
query_params[ parameter ] = request.REQUEST.get( parameter )
try:
query_rp = db.execute( query, params=query_params )
query_row = query_rp.first()
except Exception as exc:
return HttpResponseBadRequest( str( exc ), content_type="text/plain" )
if query_row != None:
print query_row
return HttpResponse( query_row[0], content_type="text/plain")
return HttpResponseNotFound("service=[%s] version=[%s] resource=[%s] not found" % (service, version, resource),
content_type="text/plain")
开发者ID:blorticus,项目名称:mypam,代码行数:29,代码来源:views.py
示例4: getSupression
def getSupression():
global db_url
global supressions
global ports
global ip_ports
#This is to get all guarantees from the sql in the dwarf-server
options = {"sql_connection": db_url}
db = SqlSoup(options["sql_connection"])
sup_info = db.supression.all()
c_time = int(time.time())
db.commit()
for sup in sup_info:
src_ip = sup.src_ip
port_name = ip_ports[src_ip]["port_name"]
print "getting db"
print port_name
for pid in ports:
if port_name == ports[pid].port_name:
print "this supression is mine"
supress = sup.supression
o_time = sup.time
if port_name in supressions:
if c_time < (o_time + 10):
print "wow, a new one!"
supressions[port_name]=supress
else:
del supressions[port_name]
开发者ID:HolySparky,项目名称:dwarf,代码行数:27,代码来源:another_agent.py
示例5: scrape_vimeo
def scrape_vimeo(query):
# Extract a UTC datetime object
utc_datetime = datetime.datetime.utcnow()
# Connect to the DB; if we can't, this will fail anyway.
db = SqlSoup(dbconfig.dburl)
page = urllib2.urlopen(query).read()
soup = BeautifulSoup(page)
count_regex = re.compile('\+ Browse all ([0-9.]+)K videos')
for license_code in license_codes:
license = 'http://creativecommons.org/licenses/%s/3.0/' % license_code
count_href = '/creativecommons/' + license_code
count_text = soup.find('a', 'more', href=count_href).string
count = count_regex.match(count_text).group(1)
if count:
# Vimeo notates number of videos as [N]K, where N may be a floating
# point number, but the true number will never be floating, so just
# do the multiplication, then convert it to an integer.
real_count = int(float(count) * 1000)
# Build a db row
row = {}
row['utc_time_stamp'] = utc_datetime
row['site'] = 'http://vimeo.com/'
row['license_uri'] = license
row['count'] = real_count
# Insert count into site_specific table
db.site_specific.insert(**row)
db.flush()
else:
raise Exception, "No count found at Vimeo for license: " + license
开发者ID:cc-archive,项目名称:stats,代码行数:31,代码来源:nightly_import_scrape.py
示例6: plominoSqlSync
def plominoSqlSync(session, plominoDocument, **table_infos):
"""
table_infos = dict(schema='<schema_table>')
"""
if isinstance(session, basestring):
session = named_scoped_session(session)
engine = session.get_bind()
db = SqlSoup(engine, session=session)
table_name = plominoDocument.Form
main_table = db.entity(table_name, **table_infos)
values = dict()
plominoItems = plominoDocument.getItems()
for column in main_table.c:
if column.key == u"id":
continue
if column.key in plominoItems and (plominoDocument.getItem(column.key, None) != None):
values[column.key] = plominoDocument.getItem(column.key)
plominoDatabase = plominoDocument.getParentDatabase()
values[u"modified_by"] = plominoDatabase.getCurrentUser().id
values[u"last_update"] = plominoDocument.plomino_modification_time.asdatetime()
if plominoDocument.isNewDocument():
values[u"plominoId"] = plominoDocument.id
main_table.insert(**values)
else:
if not main_table.filter(main_table.plominoId == plominoDocument.id).update(values):
values[u"plominoId"] = plominoDocument.id
main_table.insert(**values)
plominoForm = plominoDatabase.getForm(plominoDocument.Form)
plominoFields = plominoForm.getFormFields(includesubforms=True, applyhidewhen=True)
for field in plominoFields:
if field.getFieldType() in ("DATAGRID",) and (plominoDocument.getItem(field.id) != None):
gridItem = plominoDocument.getItem(field.id)
grid_table_name = field.id # oppure field.getSettings(key='associated_form')
try:
grid_table = db.entity(grid_table_name, **table_infos)
except NoSuchTableError, err:
pass
else:
grid_table.filter(grid_table.parentId == plominoDocument.id).delete()
vals = dict()
for record in gridItem:
field_mapping = field.getSettings(key="field_mapping").split(",")
for idx, key in enumerate(field_mapping):
if record[idx] != None:
vals[key] = record[idx]
vals[u"parentId"] = plominoDocument.id
grid_table.insert(**vals)
开发者ID:silviot,项目名称:gisweb.utils,代码行数:57,代码来源:db_utils.py
示例7: testconnection
def testconnection(self, param):
'''
This function lets you test the to be saved SQL connection.
:param param: A dictionary with all necessary parameter to test the connection.
:type param: dict
:return: Tuple of success and a description
:rtype: (bool, string)
Parameters are: Server, Driver, Database, User, Password, Port, Limit, Table, Map
Where, Encoding, conParams
'''
num=-1
desc=None
port = ""
password = ""
conParams = ""
if param.get("Port"):
port = ":%s" % param.get("Port")
if param.get("Password"):
password = ":%s" % param.get("Password")
if param.get("conParams"):
conParams = "?%s" % param.get("conParams")
connect_string = "%s://%s%s%s%s%s/%s%s" % (param.get("Driver"),
param.get("User"),
password,
"@" if (param.get("User") or password) else "",
param.get("Server"),
port,
param.get("Database"),
conParams)
log.info("using the connect string %s" % connect_string)
engine = create_engine(connect_string)
# create a configured "Session" class
session = sessionmaker(bind=engine)()
db = SqlSoup(engine)
TABLE = db.entity(param.get("Table"))
try:
result = session.query(TABLE).count()
num = result
desc = "Found %i users." % num
except Exception as exx:
desc = "failed to retrieve users: %s" % exx
return (num, desc)
开发者ID:asifiqbal,项目名称:privacyidea,代码行数:50,代码来源:SQLIdResolver.py
示例8: migrateVersion3
def migrateVersion3():
log.info('Upgrading DB to version 3.')
# for some normal executions
db = SqlSoup(engine)
try:
db.execute('ALTER TABLE Movie ADD dateChanged TIMESTAMP')
log.info('Added dateChanged to Movie table')
except OperationalError:
log.debug('Column dateChanged already added')
Session.add(DbVersion(3))
Session.flush()
开发者ID:Amelandbor,项目名称:CouchPotato,代码行数:14,代码来源:db.py
示例9: main
def main():
db = SqlSoup(dbconfig.dburl)
for filename in sorted(glob.glob(FLICKR_DATA_BASE_PATH + '/*.csv')):
if 'cumulative' in filename:
continue
utc_date_time = filename2utc_datetime(filename)
csv_fd = csv.reader(open(filename))
print 'Importing', filename,
for row in csv_fd:
importable = csv_row2dict(row, utc_date_time)
db.site_specific.insert(**importable)
db.flush()
# since that worked, rename the filename
os.rename(filename, filename + '.imported')
print '...done.'
开发者ID:cc-archive,项目名称:stats,代码行数:15,代码来源:csv2flickrdb.py
示例10: _sql_soup_init
def _sql_soup_init(self, schema):
"""
Gets the schema to connect
to the db, creates a SqlSoup instance, sets the schema
:param schema: database schema
:type schema: str
"""
# be sure that autoflushing/expire_on_commit/autocommit are false
soup_db = SqlSoup(self.engine,
session=scoped_session(sessionmaker(autoflush=False,
expire_on_commit=False, autocommit=False)))
soup_db.schema = schema
soup_db.catalog.relate('surface', soup_db.surface)
soup_db.catalog.relate('magnitude', soup_db.magnitude)
return soup_db
开发者ID:danciul,项目名称:openquake,代码行数:16,代码来源:loader.py
示例11: connectDbRemote
def connectDbRemote(self):
""" effettua la connessione al DB remoto """
SessionRemote = None
self.metaRemote = None
# self.sessionRemote.close_all()
# self.engine.dispose() # added this
# del self.sessionRemote
# del self.engine
mainschema_remoto = Environment.conf.SincroDB.mainschema_remoto
user_remoto = Environment.conf.SincroDB.user_remoto
password_remoto = Environment.conf.SincroDB.password_remoto
host_remoto = Environment.conf.SincroDB.host_remoto
port_remoto = Environment.conf.SincroDB.port_remoto
database_remoto = Environment.conf.SincroDB.database_remoto
engine = create_engine('postgres:'+'//'
+user_remoto+':'
+ password_remoto+ '@'
+ host_remoto + ':'
+ port_remoto + '/'
+ database_remoto,
encoding='utf-8',
convert_unicode=True )
tipo_eng = engine.name
engine.echo = False
self.metaRemote = MetaData(engine)
self.pg_db_server_remote = SqlSoup(self.metaRemote)
self.pg_db_server_remote.schema = Environment.params["schema"]
self.pg_db_server_main_remote = SqlSoup(self.metaRemote)
self.pg_db_server_main_remote.schema = mainschema_remoto
SessionRemote = scoped_session(sessionmaker(bind=engine))
self.sessionRemote = SessionRemote()
print ">>>> CONNESSO AL DB REMOTO : %s IP: %s PORTA: %s SCHEMA %s <<<<< " %(database_remoto, host_remoto, port_remoto, Environment.params["schema"])
开发者ID:fmarella,项目名称:pg3,代码行数:33,代码来源:SincroDB.py
示例12: GenericFillData
class GenericFillData(object):
def __init__(self):
self.azienda_destinazione = "latelier"
self.database = "promogest_db"
self.port = "5432"
self.user = "promoadmin"
self.password = "admin"
self.host = "localhost"
self.file_csv = "aliquota_iva.csv"
self.mainSchema = "promogest2"
self.connection()
def connection(self):
engine = create_engine('postgres:'+'//'+self.user+':'
+ self.password+ '@'
+ self.host + ':'
+ self.port + '/'
+ self.database,
encoding='utf-8',
convert_unicode=True )
engine.echo = True
meta = MetaData(engine)
self.pg_db_dest = SqlSoup(meta)
self.pg_db_dest.schema = self.azienda_destinazione
self.readFile()
def readFile(self):
spamReader = csv.reader(open(self.file_csv), delimiter=';', quotechar='"')
self.fillDataContact(spamReader)
def fillDataContact(self,spamReader):
for row in spamReader:
_art = self.pg_db_dest.aliquota_iva()
_art.id=row[0]
_art.denominazione_breve=row[1]
_art.denominazione=row[2]
_art.percentuale = row[3]
_art.percentuale_detrazione = row[4]
_art.descrizione_detrazione = row[5]
_art.id_tipo = row[6]
sqlalchemy.ext.sqlsoup.Session.add(_art)
sqlalchemy.ext.sqlsoup.Session.commit()
self.pg_db_dest.flush()
开发者ID:Alwnikrotikz,项目名称:promogest,代码行数:46,代码来源:importAliquotaIva.py
示例13: main
def main(infd, unix_time, dry_run = False):
# Extract a UTC datetime object
utc_datetime = datetime.datetime.utcfromtimestamp(unix_time)
# Connect to the DB; if we can't, this will fail anyway.
db = SqlSoup(dbconfig.dburl)
# Scrape the results we just wgetted
license2count = parse(infd)
# Prepare any remaining DB columns, and write a CSV summary file
extra_data = {
'utc_time_stamp': utc_datetime,
'site': 'http://www.flickr.com/'}
importable = []
csv_values = {}
license2flickr = dict((v,k) for k,v in flickr2license.items())
for key in license2count:
row = {}
row.update(extra_data)
row['count'] = license2count[key]
row['license_uri'] = key
importable.append(row)
csv_values[license2flickr[key]] = row['count']
if dry_run:
print importable
print csv_values
else:
# Write data to CSV file
csv = open('./data/%s.csv.imported' % utc_datetime.date(), 'w')
for license, count in csv_values.items():
csv.write('%s,%d\n' % (license,count))
csv.close()
# Write data to database
counts = {}
for row in importable:
db.site_specific.insert(**row)
db.flush()
counts[row['license_uri'].split('/')[4]] = row['count']
# Sort by license code
lic_sorted = sorted(counts)
# Join counts (sorted) with a comma
cnt_sorted = ','.join(map(str, [counts[key] for key in lic_sorted]))
# Write csv data to big historical file. WARNING: this presupposes
# that license version and jurisdiction never change on Flickr
hist = open('./data/counts-historical-Flickr.csv', 'a')
hist.write(str(utc_datetime.date()) + ',Flickr,2.0,Unported,' + cnt_sorted + '\n')
hist.close()
开发者ID:cc-archive,项目名称:stats,代码行数:46,代码来源:nightly_import_scrape.py
示例14: start
def start(self):
super(MySQLBind9Backend, self).start()
if cfg.CONF[self.name].write_database:
self._engine = get_engine(self.name)
self._db = SqlSoup(self._engine)
self._sync_domains()
开发者ID:joeracker,项目名称:designate,代码行数:8,代码来源:impl_mysqlbind9.py
示例15: loadConfig
def loadConfig(self, config, conf):
'''
Load the config from conf.
:param config: The configuration from the Config Table
:type config: dict
:param conf: the instance of the configuration
:type conf: string
The information which config entries we need to load is taken from
manage.js: function save_sql_config
'''
self.resolverId = conf
self.server = self.getConfigEntry(config, 'privacyidea.sqlresolver.Server', conf)
self.driver = self.getConfigEntry(config, 'privacyidea.sqlresolver.Driver', conf)
self.database = self.getConfigEntry(config, 'privacyidea.sqlresolver.Database', conf)
self.port = self.getConfigEntry(config, 'privacyidea.sqlresolver.Port', conf, required=False)
self.limit = self.getConfigEntry(config, 'privacyidea.sqlresolver.Limit', conf, required=False, default=100)
self.user = self.getConfigEntry(config, 'privacyidea.sqlresolver.User', conf)
self.password = self.getConfigEntry(config, 'privacyidea.sqlresolver.Password', conf, required=False)
self.table = self.getConfigEntry(config, 'privacyidea.sqlresolver.Table', conf)
usermap = self.getConfigEntry(config, 'privacyidea.sqlresolver.Map', conf)
self.map = yaml.load(usermap)
self.reverse_map = dict([[v,k] for k,v in self.map.items()])
self.where = self.getConfigEntry(config, 'privacyidea.sqlresolver.Where', conf, required=False)
self.encoding = self.getConfigEntry(config, 'privacyidea.sqlresolver.Encoding', conf, required=False, default="latin1")
self.conParams = self.getConfigEntry(config, 'privacyidea.sqlresolver.conParams', conf, required=False)
# create the connectstring like
# driver://user:[email protected]/database?conParams
port = ""
password = ""
if self.port:
port = ":%s" % self.port
if self.password:
password = ":%s" % self.password
self.connect_string = "%s://%s%[email protected]%s%s/%s?%s" % (self.driver,
self.user,
password,
self.server,
port,
self.database,
self.conParams)
log.info("using the connect string %s" % self.connect_string)
self.engine = create_engine(self.connect_string, encoding=str(self.encoding))
# create a configured "Session" class
Session = sessionmaker(bind=self.engine)
# create a Session
self.session = Session()
self.db = SqlSoup(self.engine)
self.TABLE = self.db.entity(self.table)
return self
开发者ID:asifiqbal,项目名称:privacyidea,代码行数:56,代码来源:SQLIdResolver.py
示例16: suggestFromTable
def suggestFromTable(sessionname, name, columnname, others=[], schema="public", tip="", **filters):
"""
utile per l'implementazione di semplici servizi di auto-suggest da tabella.
sessionname: neme della sessione
name: nome della tabella
columnname: nome della colonna da cui attingere
others: altre colonne cui si è interessati al valore. Usare '' per tutte.
schema: nome dello schema di appartenenza della tabella
tip: "suggerimento"
filters: filtri aggiuntivi del tipo <chiave>=<valore>
"""
session = get_session(sessionname)
engine = session.get_bind()
db = SqlSoup(engine)
table = db.entity(name, schema=schema)
if isinstance(others, (list, tuple)):
# you can submit a list of column values to return, at least an empty list
query = session.query(*[table.c[col] for col in [columnname] + list(others)])
elif others == "filters_only":
# easter egg: useful?
query = session.query(*[table.c[col] for col in [columnname] + filters.keys()])
else:
# otherwise all columns will be returned
query = table
column = table.c[columnname]
tip = tip.rstrip()
# ilike '%(tip)s%%'
where = or_(
column.startswith(tip),
column.startswith(tip.capitalize()),
column.startswith(tip.title()),
column.startswith(tip.lower()),
column.startswith(tip.upper()),
)
# other simple filters
where = and_(where, *[(table.c[k] == v) for k, v in filters.items() if k in table.c.keys()])
return [row.__dict__ for row in query.filter(where).all()]
开发者ID:silviot,项目名称:gisweb.utils,代码行数:42,代码来源:db_utils.py
示例17: migrateVersion4
def migrateVersion4():
log.info('Upgrading DB to version 4.')
# for some normal executions
db = SqlSoup(engine)
try:
db.execute('ALTER TABLE MovieETA ADD lastCheck INTEGER')
log.info('Added lastCheck to MovieETA table')
except OperationalError:
log.debug('Column lastCheck already added.')
try:
db.execute('ALTER TABLE MovieQueue ADD lastCheck INTEGER')
log.info('Added lastCheck to MovieQueue table')
except OperationalError:
log.debug('Column lastCheck already added.')
Session.add(DbVersion(4))
Session.flush()
开发者ID:Amelandbor,项目名称:CouchPotato,代码行数:20,代码来源:db.py
示例18: __init__
def __init__(self):
# Create any tables that don't already exist
self.createTables()
self.db = SqlSoup(FLAGS.api_db_url)
self.db_nets = self.db.networks
self.db_ports = self.db.ports
self.db_macs = self.db.macs
self.db_bonds = self.db.bonds
self.db_flowspace = self.db.flowspace
self.db_net2slice = self.db.delegated_nets
开发者ID:zoomis,项目名称:ryu,代码行数:11,代码来源:api_db.py
示例19: connection
def connection(self):
engine = create_engine('postgres:'+'//'+USER+':'
+ PASSWORD+ '@'
+ HOST + ':'
+ PORT + '/'
+ DATABASE,
encoding='utf-8',
convert_unicode=True )
engine.echo = True
meta = MetaData(engine)
self.pg_db_dest = SqlSoup(meta)
self.pg_db_dest.schema = AZIENDA_DESTINAZIONE
self.readFile()
开发者ID:fmarella,项目名称:pg3,代码行数:14,代码来源:importInventario.py
示例20: connection
def connection(self):
engine = create_engine('postgres:'+'//'+self.user+':'
+ self.password+ '@'
+ self.host + ':'
+ self.port + '/'
+ self.database,
encoding='utf-8',
convert_unicode=True )
engine.echo = True
meta = MetaData(engine)
self.pg_db_dest = SqlSoup(meta)
self.pg_db_dest.schema = self.azienda_destinazione
self.readFile()
开发者ID:Alwnikrotikz,项目名称:promogest,代码行数:14,代码来源:importContacts.py
注:本文中的sqlalchemy.ext.sqlsoup.SqlSoup类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论