本文整理汇总了Python中sqlalchemy.func.sum函数的典型用法代码示例。如果您正苦于以下问题:Python sum函数的具体用法?Python sum怎么用?Python sum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: totalList
def totalList():
if not redis.llen('rank:total'):
total_row = db_session.query(
User.id,
User.username,
label('number', func.count(Investment.amount)),
label('total_amount', func.sum(Investment.amount))
).filter(
Investment.user_id == User.id,
).group_by(User.id).order_by(
func.sum(Investment.amount).desc()
).limit(15).all()
total_list = []
for i in total_row:
i = dict(zip(i.keys(), i))
data = {
'id': i['id'],
'username': i['username'],
'total_amount': float(i['total_amount']),
'number': i['number']
}
total_list.append(data)
redis.rpush('rank:total', json.dumps(data))
redis.expire('rank:total', 3600)
else:
total_list = [json.loads(i.decode()) for i in redis.lrange('rank:total', 0, redis.llen('rank:total'))]
return total_list
开发者ID:xxguo,项目名称:leopard,代码行数:30,代码来源:rank.py
示例2: leaders_weekly
def leaders_weekly(session, batch=20):
result = {'new_cell': [], 'new_wifi': []}
today = util.utcnow().date()
one_week = today - timedelta(7)
score_rows = {}
userids = set()
for name in ('new_cell', 'new_wifi'):
score_rows[name] = session.query(
Score.userid, func.sum(Score.value)).filter(
Score.key == ScoreKey[name]).filter(
Score.time >= one_week).order_by(
func.sum(Score.value).desc()).group_by(
Score.userid).limit(batch).all()
userids.update(set([s[0] for s in score_rows[name]]))
if not userids: # pragma: no cover
return result
user_rows = session.query(User.id, User.nickname).filter(
User.id.in_(userids)).all()
users = dict(user_rows)
for name, value in score_rows.items():
for userid, value in value:
nickname = users.get(userid, 'anonymous')
if len(nickname) > 24: # pragma: no cover
nickname = nickname[:24] + u'...'
result[name].append(
{'nickname': nickname, 'num': int(value)})
return result
开发者ID:awoland,项目名称:ichnaea,代码行数:32,代码来源:stats.py
示例3: get
def get(self, project_id, width=None, height=None):
config = graph_config(width, height)
config.logarithmic = True
svg = pygal.Line(config)
builds = (
self.db.query(
Build.build_id,
func.sum(Coverage.lines).label('lines'),
func.sum(Coverage.cls).label('cls'))
.select_from(Build)
.join(Coverage, Build.coverages)
.filter(Build.project_id == project_id)
.group_by(Build.build_id)
.order_by(Build.build_id)
.all())
svg.add('Lines', [{
'xlink': self.reverse_url('ProjectLog', project_id, b.build_id),
'value': b.lines or None
} for b in builds])
svg.add('Classes', [{
'xlink': self.reverse_url('ProjectLog', project_id, b.build_id),
'value': b.cls or None
} for b in builds])
if width and height:
svg.x_labels = ['#%d' % b.build_id for b in builds]
svg.value_formatter = lambda x: '%d' % (x or 0)
svg.title = 'Source metric'
self.set_header("Content-Type", "image/svg+xml")
self.write(svg.render())
开发者ID:Kozea,项目名称:ymci,代码行数:33,代码来源:routes.py
示例4: _get_last_year_statistics
def _get_last_year_statistics():
query = db.session.query(func.count('*').label('flights'),
func.sum(Flight.olc_classic_distance).label('distance'),
func.sum(Flight.duration).label('duration')) \
.filter(Flight.pilot == g.user) \
.filter(Flight.date_local > (date.today() - timedelta(days=365))) \
.first()
last_year_statistics = dict(flights=0,
distance=0,
duration=timedelta(0),
speed=0)
if query and query.flights > 0:
duration_seconds = query.duration.days * 24 * 3600 + query.duration.seconds
if duration_seconds > 0:
last_year_statistics['speed'] = float(query.distance) / duration_seconds
last_year_statistics['flights'] = query.flights
last_year_statistics['distance'] = query.distance
last_year_statistics['duration'] = query.duration
last_year_statistics['average_distance'] = query.distance / query.flights
last_year_statistics['average_duration'] = query.duration / query.flights
return last_year_statistics
开发者ID:imclab,项目名称:skylines,代码行数:27,代码来源:user.py
示例5: top_spenders
def top_spenders(limit = 10, start_date = None, end_date = None):
if start_date and end_date and start_date <= end_date:
return DBSession.query(
MoneyLog.username,
func.sum(func.abs(MoneyLog.amount))
).filter(
MoneyLog.reason == 'drop',
MoneyLog.time >= start_date,
MoneyLog.time <= end_date,
MoneyLog.username != 'openhouse'
).group_by(
MoneyLog.username
).order_by(
'sum_1 desc'
).limit(
limit
).all()
else:
return DBSession.query(
MoneyLog.username,
func.sum(func.abs(MoneyLog.amount))
).filter(
MoneyLog.reason == 'drop',
MoneyLog.username != 'openhouse'
).group_by(
MoneyLog.username
).order_by(
'sum_1 desc'
).limit(
limit
).all()
开发者ID:JDrit,项目名称:drink_stats_v2,代码行数:31,代码来源:money_log.py
示例6: get_dop
def get_dop(cls, p_kode, p_tahun):
pkey = FixLength(NOP)
pkey.set_raw(p_kode)
query = pbb_DBSession.query( func.concat(cls.kd_propinsi, '.').concat(cls.kd_dati2).concat('-').\
concat(cls.kd_kecamatan).concat('.').concat(cls.kd_kelurahan).concat('-').\
concat(cls.kd_blok).concat('.').concat(cls.no_urut).concat('-').\
concat(cls.kd_jns_op).label('nop'),
cls.thn_pajak_sppt, cls.luas_bumi_sppt, cls.njop_bumi_sppt,
cls.luas_bng_sppt, cls.njop_bng_sppt, cls.nm_wp_sppt,
cls.pbb_yg_harus_dibayar_sppt, cls.status_pembayaran_sppt,
DatObjekPajak.jalan_op, DatObjekPajak.blok_kav_no_op,
DatObjekPajak.rt_op, DatObjekPajak.rw_op,
func.coalesce(SpptOpBersama.luas_bumi_beban_sppt,0).label('luas_bumi_beban'),
func.coalesce(SpptOpBersama.luas_bng_beban_sppt,0).label('luas_bng_beban'),
func.coalesce(SpptOpBersama.njop_bumi_beban_sppt,0).label('njop_bumi_beban'),
func.coalesce(SpptOpBersama.njop_bng_beban_sppt,0).label('njop_bng_beban'),
Kelurahan.nm_kelurahan, Kecamatan.nm_kecamatan, Dati2.nm_dati2,
func.max(PembayaranSppt.tgl_pembayaran_sppt).label('tgl_bayar'),
func.sum(func.coalesce(PembayaranSppt.jml_sppt_yg_dibayar,0)).label('jml_sppt_yg_dibayar'),
func.sum(func.coalesce(PembayaranSppt.denda_sppt,0)).label('denda_sppt'),).\
outerjoin(DatObjekPajak).\
outerjoin(SpptOpBersama).\
outerjoin(PembayaranSppt,and_(
cls.kd_propinsi==PembayaranSppt.kd_propinsi,
cls.kd_dati2==PembayaranSppt.kd_dati2,
cls.kd_kecamatan==PembayaranSppt.kd_kecamatan,
cls.kd_kelurahan==PembayaranSppt.kd_kelurahan,
cls.kd_blok==PembayaranSppt.kd_blok,
cls.no_urut==PembayaranSppt.no_urut,
cls.kd_jns_op==PembayaranSppt.kd_jns_op,
cls.thn_pajak_sppt==PembayaranSppt.thn_pajak_sppt
)).\
filter(cls.kd_propinsi == Kelurahan.kd_propinsi,
cls.kd_dati2 == Kelurahan.kd_dati2,
cls.kd_kecamatan == Kelurahan.kd_kecamatan,
cls.kd_kelurahan == Kelurahan.kd_kelurahan,).\
filter(cls.kd_propinsi == Kecamatan.kd_propinsi,
cls.kd_dati2 == Kecamatan.kd_dati2,
cls.kd_kecamatan == Kecamatan.kd_kecamatan,).\
filter(cls.kd_propinsi == Dati2.kd_propinsi,
cls.kd_dati2 == Dati2.kd_dati2,).\
group_by(cls.kd_propinsi, cls.kd_dati2, cls.kd_kecamatan, cls.kd_kelurahan, cls.kd_blok,
cls.no_urut, cls.kd_jns_op, cls.thn_pajak_sppt, cls.luas_bumi_sppt, cls.njop_bumi_sppt,
cls.luas_bng_sppt, cls.njop_bng_sppt, cls.pbb_yg_harus_dibayar_sppt,
cls.status_pembayaran_sppt, DatObjekPajak.jalan_op, DatObjekPajak.blok_kav_no_op,
DatObjekPajak.rt_op, DatObjekPajak.rw_op,
SpptOpBersama.luas_bumi_beban_sppt,
SpptOpBersama.luas_bng_beban_sppt,
SpptOpBersama.njop_bumi_beban_sppt,
SpptOpBersama.njop_bng_beban_sppt,
Kelurahan.nm_kelurahan, Kecamatan.nm_kecamatan, Dati2.nm_dati2,)
return query.filter(
cls.kd_propinsi == pkey['kd_propinsi'],
cls.kd_dati2 == pkey['kd_dati2'],
cls.kd_kecamatan == pkey['kd_kecamatan'],
cls.kd_kelurahan == pkey['kd_kelurahan'],
cls.kd_blok == pkey['kd_blok'],
cls.no_urut == pkey['no_urut'],
cls.kd_jns_op == pkey['kd_jns_op'],
cls.thn_pajak_sppt==p_tahun)
开发者ID:aagusti,项目名称:opensipkd-rpc,代码行数:60,代码来源:pbb.py
示例7: _total
def _total(self, balance_uid):
model = request.environ["sqlalchemy.model"]
db = request.environ["sqlalchemy.session"]
incomes = db.execute(
select(
[func.coalesce(func.sum(model.BalanceChange.c.amount), 0)],
and_(model.BalanceChange.c.balance_uid == balance_uid, model.BalanceChange.c.is_income == True),
from_obj=[model.balance_changes_table],
)
).fetchone()[0]
expenses = db.execute(
select(
[func.coalesce(func.sum(model.BalanceChange.c.amount), 0)],
and_(model.BalanceChange.c.balance_uid == balance_uid, model.BalanceChange.c.is_income == False),
from_obj=[model.balance_changes_table],
)
).fetchone()[0]
try:
return incomes - expenses
except:
log.error("", exc_info=1)
return 0
开发者ID:pawelniewie,项目名称:5groszy.pl,代码行数:25,代码来源:balances.py
示例8: GetInvoiceTotals
def GetInvoiceTotals(self, tenantId, param=None):
"""
Calculates invoice totals, amounts, due, etc.,
"""
if tenantId:
query = DBSession.query(
func.count(Order.Id).label("Count"),
func.ifnull(func.sum(Order.OrderAmount), 0).label("TotalAmount"),
func.ifnull(
func.sum(func.IF(Order.PaidAmount >= Order.OrderAmount, Order.OrderAmount, Order.PaidAmount)), 0
).label("PaidAmount"),
)
query = query.filter(Order.TenantId == tenantId, Order.Status == True)
if param:
query = self.applySearchParam(query, param)
totals = query.first()
if totals:
oq = query.filter((Order.OrderAmount - Order.PaidAmount) > 0.5, Order.DueDate < func.now()).subquery()
totals.Overdues = DBSession.query(
oq.c.Count, (oq.c.TotalAmount - oq.c.PaidAmount).label("OverdueAmount")
).first()
return totals
return None
开发者ID:cackharot,项目名称:viper-pos,代码行数:28,代码来源:ReportService.py
示例9: _get_dep_statuses
def _get_dep_statuses(self, ti, session, dep_context):
TI = airflow.models.TaskInstance
TR = airflow.models.TriggerRule
# Checking that all upstream dependencies have succeeded
if not ti.task.upstream_list:
yield self._passing_status(
reason="The task instance did not have any upstream tasks.")
raise StopIteration
if ti.task.trigger_rule == TR.DUMMY:
yield self._passing_status(reason="The task had a dummy trigger rule set.")
raise StopIteration
# TODO(unknown): this query becomes quite expensive with dags that have many
# tasks. It should be refactored to let the task report to the dag run and get the
# aggregates from there.
qry = (
session
.query(
func.coalesce(func.sum(
case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.EXCLUDED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.FAILED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
func.count(TI.task_id),
)
.filter(
TI.dag_id == ti.dag_id,
TI.task_id.in_(ti.task.upstream_task_ids),
TI.execution_date == ti.execution_date,
TI.state.in_([
State.SUCCESS, State.FAILED, State.EXCLUDED,
State.UPSTREAM_FAILED, State.SKIPPED]),
)
)
successes, excluded, skipped, failed, upstream_failed, done = qry.first()
# Add excluded tasks into successful tasks as they are equivalent for
# dependency purposes. This is done in this way, not using the
# state_for_dependents function, due to the constraints of SQLAlchemy
# queries.
successes = successes + excluded
for dep_status in self._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=dep_context.flag_upstream_failed,
session=session):
yield dep_status
开发者ID:owlabs,项目名称:incubator-airflow,代码行数:60,代码来源:trigger_rule_dep.py
示例10: __init__
def __init__(self, context):
ResourcesQueryBuilder.__init__(self, context)
self._cashflows_from_sub = (
DBSession.query(
func.sum(Cashflow.sum).label('expenses'),
Cashflow.account_item_id.label('account_item_id'),
)
.join(Subaccount, Cashflow.subaccount_from)
.group_by(Cashflow.account_item_id)
)
self._cashflows_to_sub = (
DBSession.query(
func.sum(Cashflow.sum).label('revenue'),
Cashflow.account_item_id.label('account_item_id'),
)
.join(Subaccount, Cashflow.subaccount_to)
.group_by(Cashflow.account_item_id)
)
self._fields = {
'id': AccountItem.id,
'_id': AccountItem.id,
'name': AccountItem.name,
'text': AccountItem.name,
'parent_id': AccountItem.parent_id.label('parent_id')
}
self.build_query()
开发者ID:alishir,项目名称:tcr,代码行数:26,代码来源:turnovers.py
示例11: weekList
def weekList():
if not redis.llen('rank:week'):
rows = db_session.query(
User.id,
User.username,
label('number', func.count(Investment.amount)),
label('total_amount', func.sum(Investment.amount))
).filter(
Investment.user_id == User.id,
cast(Investment.added_at, Date) <= datetime.datetime.today(),
cast(Investment.added_at, Date) >= datetime.datetime.today() -
datetime.timedelta(weeks=1)
).group_by(User.id).order_by(
func.sum(Investment.amount).desc()
).limit(15).all()
rank_list = []
for i in rows:
i = dict(zip(i.keys(), i))
data = {
'id': i['id'],
'username': i['username'],
'total_amount': float(i['total_amount']),
'number': i['number']
}
rank_list.append(data)
redis.rpush('rank:week', json.dumps(data))
redis.expire('rank:week', 3600)
else:
rank_list = [json.loads(i.decode()) for i in redis.lrange('rank:week', 0, redis.llen('rank:week'))]
return rank_list
开发者ID:xxguo,项目名称:leopard,代码行数:33,代码来源:rank.py
示例12: get_time_count
def get_time_count(topic,start_ts,end_ts,unit=MinInterval):#按时间趋势的不同情绪的数量
count = {}
if (end_ts - start_ts < unit):
upbound = long(math.ceil(end_ts / (unit * 1.0)) * unit)
items = db.session.query(PropagateCount.mtype,func.sum(PropagateCount.dcount)).filter(PropagateCount.end==upbound, \
PropagateCount.topic==topic).group_by(PropagateCount.mtype).all()
count[end_ts]={}
for item in items:
try:
count[end_ts][item[0]] += item[1]
except:
count[end_ts][item[0]] = item[1]
else:
upbound = long(math.ceil(end_ts / (unit * 1.0)) * unit)
lowbound = long((start_ts / unit) * unit)
interval = (upbound-lowbound)/unit
for i in range(interval, 0, -1):
begin_ts = upbound - unit * i
end_ts = begin_ts + unit
try:
items = db.session.query(PropagateCount.mtype,func.sum(PropagateCount.dcount)).filter(PropagateCount.end>begin_ts, \
PropagateCount.end<=end_ts, \
PropagateCount.topic==topic).group_by(PropagateCount.mtype).all()
except:
db.session.rollback()
count[end_ts] = {}
for item in items:
print 'item::::::::',item
try:
count[end_ts][item[0]] += item[1]
except:
count[end_ts][item[0]] = item[1]
return count
开发者ID:SwoJa,项目名称:ruman,代码行数:33,代码来源:utils.py
示例13: at_snapshot
def at_snapshot(snapshot):
p_by_user = db.query(func.sum(Job.processors), Job.user).group_by(Job.user).filter_by(cluster=cluster, snapshot=snapshot,
status='R').all()
n_free_procs = db.query(func.sum(Node.n_procs)).filter_by(cluster=cluster,
snapshot=snapshot, state='free').first()[0]
p_by_user.append((n_free_procs, u'free'))
return p_by_user, snapshot.time.isoformat()
开发者ID:rmcgibbo,项目名称:webstat,代码行数:7,代码来源:analytics.py
示例14: main
def main(argv=None):
fig = plt.figure(figsize=(3,2.5))
#fig.suptitle("Residential Growth by Year", fontsize=18, weight='bold')
s = Session()
region = s.query(Context).get(3)
q = s.query(TCADSegment).join(TCADSegment.improvement).filter(TCADImprovement.description.in_(['FOURPLEX','APARTMENT 100+','CONDO (STACKED)','APARTMENT 50-100', '1 FAM DWELLING', '2 FAM DWELLING', '1/2 DUPLEX', 'APARTMENT 5-25', 'APARTMENT 26-49']))
X = range(1980, 2010)
All = array( [ q.filter( TCADSegment.year_built == x ).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
All_norm = array( [ q.filter( TCADSegment.year_built <= x ).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
East = array( [ q.join(TCADImprovement.parcel).filter(TCAD_2010.the_geom.within(region.geom)).filter(TCADSegment.year_built == x).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
East_norm = array( [ q.join(TCADImprovement.parcel).filter(TCAD_2010.the_geom.within(region.geom)).filter(TCADSegment.year_built <= x).value(func.sum(TCADSegment.area)) for x in X ], dtype=float )
ax = plt.subplot(111)
p2=ax.plot(X,100 * All/All_norm, color='.75')
p1=ax.plot(X,100 * East/East_norm, color='k', lw=2)
ax.set_ylabel("Growth in Square Feet (%)")
ax.set_xlabel("Year")
ax.grid(True)
ax.axis([1980,2010,0,5])
ax.legend([p2,p1],['All Austin','East Side'], loc='upper left')
plt.subplots_adjust(right=.93, top=.95, bottom=.15, left=.15)
show()
开发者ID:kerinin,项目名称:giscrape,代码行数:30,代码来源:residential_percent_per_year.py
示例15: create_summary_accs
def create_summary_accs():
query = db.session.query(db.Hesaplar.ana_hesap,
db.Hesaplar.lead_code,
func.sum(db.Hesaplar.py1).label('py1'),
func.sum(db.Hesaplar.py2).label('py2'),
func.sum(db.Hesaplar.cy).label('cy'),
).group_by('ana_hesap')
for k in query.all():
unmapped = None
if k.lead_code == 'Unmapped':
unmapped = db.session.query(db.Hesaplar).filter_by(len=3,
ana_hesap=k.ana_hesap).first() or db.session.query(
db.Hesaplar).filter_by(ana_hesap=k.ana_hesap).first()
source = db.session.query(db.Lead).filter_by(account=k.ana_hesap).first()
main_source = unmapped or source
name = main_source.name
db.session.add(
db.Hesaplar(number=k.ana_hesap, ana_hesap=k.ana_hesap, name=name, lead_code=main_source.lead_code, cy=k.cy,
py1=k.py1, py2=k.py2, len=3))
db.session.commit()
开发者ID:xybydy,项目名称:kirilim,代码行数:26,代码来源:reader.py
示例16: member_payments
def member_payments():
q = Session.query(Member.id, Member.nick, func.sum(Payment.amount) /
func.sum(Payment.months)).join((Payment, Member.id
== Payment.member_id)).filter(Member.active == True).\
group_by(Member.id).order_by(Member.id)
return q
开发者ID:MerlijnWajer,项目名称:TInance,代码行数:7,代码来源:stats.py
示例17: test_proportion_overlap
def test_proportion_overlap(loader):
# Calculate proportion of each parcel overlapped by water.
parcels = loader.tables.sample.heather_farms
water = loader.tables.sample.hf_water
assert not hasattr(parcels, 'proportion_water')
spatialtoolz.proportion_overlap(parcels, water, 'proportion_water')
assert hasattr(parcels, 'proportion_water')
# Build DataFrame from columns of parcels table.
columns = [parcels.parcel_id, parcels.geom.ST_Area(),
parcels.proportion_water]
parcels_df = db_to_df(columns, index_col='parcel_id')
# Assert that proportion overlap values are between 0 and 1.
assert parcels_df.proportion_water.dtype == float
assert not (parcels_df.proportion_water < 0).any()
assert not (parcels_df.proportion_water > 1).any()
# Assert that sum of overlapped parcel area is <= total water area.
with loader.database.session() as sess:
overlapped_area = sess.query(
func.sum(parcels.proportion_water * parcels.geom.ST_Area())
).scalar()
water_area = sess.query(func.sum(water.geom.ST_Area())).scalar()
assert overlapped_area <= water_area
开发者ID:UDST,项目名称:spandex,代码行数:25,代码来源:test_spatialtoolz.py
示例18: get_dag_duration_info
def get_dag_duration_info():
'''get duration of currently running DagRuns
:return dag_info
'''
driver = Session.bind.driver
durations = {
'pysqlite': func.sum(
(func.julianday(func.current_timestamp()) - func.julianday(DagRun.start_date)) * 86400.0
),
'mysqldb': func.sum(func.timestampdiff(text('second'), DagRun.start_date, func.now())),
'default': func.sum(func.now() - DagRun.start_date)
}
duration = durations.get(driver, durations['default'])
with session_scope(Session) as session:
return session.query(
DagRun.dag_id,
DagRun.run_id,
duration.label('duration')
).group_by(
DagRun.dag_id,
DagRun.run_id
).filter(
DagRun.state == State.RUNNING
).all()
开发者ID:slash-cyberpunk,项目名称:airflow-exporter,代码行数:25,代码来源:prometheus_exporter.py
示例19: load_globalcounts
def load_globalcounts(list_of_wordcounts):
"""
Adds wordcounts for all unique words. There should only be one row per unique word.
"""
# i = 0
for localcount_dict in list_of_wordcounts:
# if i < 5:
for word, count in localcount_dict.iteritems():
item = session.query(GlobalCount).filter(GlobalCount.term == word).first()
if item:
print "%r is already in globalcounts. Updating count..." % word
# update the global count for this word, because we have added new songs with more occurrences of this word
q = session.query(LocalCount.term, func.sum(LocalCount.count))
q = q.group_by(LocalCount.term)
q = q.filter(LocalCount.term == word)
results = q.all()
# print "Current count for %r is %d" % (item.term, item.count)
item.count = results[0][1]
print "Updating %r's count to %d" % (item.term, item.count)
session.commit()
else:
print "%r not in globalcounts table, creating new row" % word
qq = session.query(LocalCount.term, func.sum(LocalCount.count))
qq = qq.group_by(LocalCount.term)
qq = qq.filter(LocalCount.term == word)
resultsresults = qq.all()
countcount = resultsresults[0][1]
new_row = GlobalCount(term = word, count = countcount)
session.add(new_row)
# you must commit before you query the same word/item again!
session.commit()
开发者ID:magshi,项目名称:golem,代码行数:35,代码来源:seed.py
示例20: getSetStatistics
def getSetStatistics(id):
results = db.session.query(Entity, Set, func.sum(Statistics.match_wins).label("total_match_wins"),
func.sum(Statistics.match_losses).label("total_match_losses"),
func.sum(Statistics.game_wins).label("total_game_wins"),
func.sum(Statistics.game_losses).label("total_game_losses")).join(Statistics).join(Tournament).join(TournamentType).join(Set).filter(TournamentType.description == 'Normal').filter(Set.id == id).group_by(Entity.id, Set.id).all()
tournaments = db.session.query(Entity, Tournament.id).join(Statistics).join(Tournament).join(TournamentType).join(Set).filter(Statistics.position == 1).filter(Statistics.matches_unfinished == 0).filter(TournamentType.description == 'Normal').filter(Set.id == id).all()
statistics = []
for row in results:
wins = 0
for tournament in tournaments:
if row.Entity == tournament[0] and not unfinishedMatchesInTournament(tournament[1]):
wins += 1
rowDictionary = {'total_match_wins':row.total_match_wins,
'total_match_losses':row.total_match_losses,
'total_game_wins':row.total_game_wins,
'total_game_losses':row.total_game_losses,
'match_win_percentage':row.total_match_wins/(row.total_match_wins + row.total_match_losses) * 100 if (row.total_match_wins + row.total_match_losses) > 0 else 0.0,
'game_win_percentage':row.total_game_wins/(row.total_game_wins + row.total_game_losses) * 100 if (row.total_game_wins + row.total_game_losses) > 0 else 0.0,
'total_matches_played':row.total_match_wins + row.total_match_losses,
'player':row.Entity.participants[0].player.name,
'set':row.Set.name,
'tournament_wins':wins}
statistics.append(rowDictionary)
return addPositions(statistics)
开发者ID:adamngray,项目名称:Magic,代码行数:30,代码来源:api.py
注:本文中的sqlalchemy.func.sum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论