本文整理汇总了Python中nomenklatura.model.Dataset类的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dataset类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: import_upload
def import_upload(dataset_name, id, account_id,
entity_col, alias_col):
dataset = Dataset.find(dataset_name)
account = Account.by_id(account_id)
metadata, row_set = parse_upload(dataset, id)
headers = detect_headers(row_set)
for row in row_set:
data = dict([(c.column, c.value) for c in row])
entity = data.pop(entity_col) if entity_col else None
alias = data.pop(alias_col) if alias_col else None
if alias_col and alias is not None and len(alias) and alias != entity:
d = {'name': alias, 'data': data}
alias_obj = Alias.lookup(dataset, d, account,
match_entity=False)
data = {}
if entity_col and entity is not None and len(entity):
d = {'name': entity, 'data': data}
entity_obj = Entity.by_name(dataset, entity)
if entity_obj is None:
entity_obj = Entity.create(dataset, d, account)
entity_obj.data = data
if alias_col and entity_col:
alias_obj.match(dataset, {'choice': entity_obj.id}, account)
db.session.commit()
flush_cache(dataset)
开发者ID:imclab,项目名称:nomenklatura,代码行数:25,代码来源:importer.py
示例2: view
def view(dataset, link):
dataset = Dataset.find(dataset)
link = Link.find(dataset, link)
format = response_format()
if format == 'json':
return jsonify(link)
return "Not implemented!"
开发者ID:csenger,项目名称:nomenklatura,代码行数:7,代码来源:link.py
示例3: match
def match(dataset, link, random=False):
dataset = Dataset.find(dataset)
authz.require(authz.dataset_edit(dataset))
link = Link.find(dataset, link)
random = random or request.args.get('random')=='True'
choices = match_op(link.key, dataset,
query=request.args.get('query'))
pager = Pager(choices, '.match',
dataset=dataset.name, link=link.id,
limit=10)
# HACK: Fetch only the values on the selected page.
value_objs = Value.id_map(dataset, map(lambda (c,v,s): v,
pager.query[pager.offset:pager.offset+pager.limit]))
for i, (c,v,s) in enumerate(pager.query):
if v in value_objs:
pager.query[i] = (c, value_objs.get(v), s)
html = render_template('link/match.html',
dataset=dataset, link=link, choices=pager,
random=random)
choice = 'INVALID' if link.is_invalid else link.value_id
if len(choices) and choice is None:
c, v, s = choices[0]
choice = 'INVALID' if s <= 50 else v
return htmlfill.render(html, force_defaults=False,
defaults={'choice': choice,
'value': link.key,
'query': request.args.get('query', ''),
'random': random})
开发者ID:okfn,项目名称:nomenklatura,代码行数:30,代码来源:link.py
示例4: edit
def edit(dataset):
dataset = Dataset.find(dataset)
authz.require(authz.dataset_manage(dataset))
html = render_template('dataset/edit.html',
dataset=dataset,
algorithms=get_algorithms())
return htmlfill.render(html, defaults=dataset.as_dict())
开发者ID:okfn,项目名称:nomenklatura,代码行数:7,代码来源:dataset.py
示例5: match
def match(dataset, alias, random=False):
dataset = Dataset.find(dataset)
authz.require(authz.dataset_edit(dataset))
alias = Alias.find(dataset, alias)
random = random or request.args.get('random')=='True'
choices = match_op(alias.name, dataset,
query=request.args.get('query'))
pager = Pager(choices, '.match',
dataset=dataset.name, alias=alias.id,
limit=10)
# HACK: Fetch only the entities on the selected page.
entities = Entity.id_map(dataset, map(lambda (c,e,s): e,
pager.query[pager.offset:pager.offset+pager.limit]))
for i, (c,e,s) in enumerate(pager.query):
if e in entities:
pager.query[i] = (c, entities.get(e), s)
html = render_template('alias/match.html',
dataset=dataset, alias=alias, choices=pager,
random=random)
choice = 'INVALID' if alias.is_invalid else alias.entity_id
if len(choices) and choice is None:
c, e, s = choices[0]
choice = 'INVALID' if s <= 50 else e.id
return htmlfill.render(html, force_defaults=False,
defaults={'choice': choice,
'name': alias.name,
'query': request.args.get('query', ''),
'random': random})
开发者ID:OpenRefine,项目名称:nomenklatura,代码行数:30,代码来源:alias.py
示例6: reconcile_index
def reconcile_index(dataset):
domain = url_for('index', _external=True).strip('/')
urlp = domain + '{{id}}'
meta = {
'name': 'nomenklatura',
'identifierSpace': 'http://rdf.freebase.com/ns/type.object.id',
'schemaSpace': 'http://rdf.freebase.com/ns/type.object.id',
'view': {'url': urlp},
'preview': {
'url': urlp + '?preview=true',
'width': 600,
'height': 300
}
}
if dataset is not None:
meta['name'] = dataset.label
meta['suggest'] = {
'entity': {
'service_url': domain,
'service_path': '/' + dataset.name + '/suggest',
'flyout_service_path': '/flyout'
}
}
meta['defaultTypes'] = [{'name': dataset.label, 'id': '/' + dataset.name}]
else:
meta['defaultTypes'] = [{'name': d.label, 'id': '/' + d.name} for d in Dataset.all()]
return jsonify(meta)
开发者ID:OpenRefine,项目名称:nomenklatura,代码行数:27,代码来源:reconcile.py
示例7: view
def view(dataset, value):
dataset = Dataset.find(dataset)
value = Value.find(dataset, value)
format = response_format()
if format == 'json':
return jsonify(value)
query = request.args.get('query', '').strip().lower()
choices = match_op(value.value, dataset)
choices = filter(lambda (c,v,s): v != value.id, choices)
if len(query):
choices = filter(lambda (c,v,s): query in Value.find(dataset,v).value.lower(),
choices)
# THIS is very inefficient - rather do this
# differently
pager = Pager(choices, '.view', dataset=dataset.name,
value=value.id, limit=10)
# HACK: Fetch only the values on the selected page.
value_objs = Value.id_map(dataset, map(lambda (c,v,s): v,
pager.query[pager.offset:pager.offset+pager.limit]))
for i, (c,v,s) in enumerate(pager.query):
if v in value_objs:
pager.query[i] = (c, value_objs.get(v), s)
return render_template('value/view.html', dataset=dataset,
value=value, values=pager, query=query)
开发者ID:okfn,项目名称:nomenklatura,代码行数:26,代码来源:value.py
示例8: review
def review(dataset):
entities = Entity.all()
dataset = Dataset.find(dataset)
entities = entities.filter_by(dataset=dataset)
entities = entities.filter(Entity.reviewed==False)
entities = entities.offset(randint(0, entities.count()-1))
return jsonify(entities.first())
开发者ID:rajasoun,项目名称:nomenklatura,代码行数:7,代码来源:matching.py
示例9: lookup
def lookup(dataset):
dataset = Dataset.find(dataset)
readonly = validators.StringBool(if_empty=False, if_missing=False)\
.to_python(request.args.get('readonly'))
readonly = readonly if authz.logged_in() else True
data = request_content()
if response_format() != 'json':
return Response("Not implemented!", status=400)
try:
alias = Alias.lookup(dataset, data, request.account,
readonly=readonly)
if alias is None:
return jsonify({
'is_matched': False,
'entity': None,
'name': data.get('name'),
'dataset': dataset.name
}, status=404)
if isinstance(alias, Entity):
return jsonify({
'is_matched': True,
'entity': alias,
'name': data.get('name'),
'dataset': dataset.name
}, status=200)
db.session.commit()
status = 200 if alias.is_matched else 404
status = 418 if alias.is_invalid else status
return jsonify(alias, status=status)
except Invalid, inv:
return handle_invalid(inv, index, data=data,
args=[dataset.name])
开发者ID:OpenRefine,项目名称:nomenklatura,代码行数:35,代码来源:alias.py
示例10: suggest
def suggest(dataset):
"""
Suggest API, emulates Google Refine API. See:
http://code.google.com/p/google-refine/wiki/SuggestApi
"""
dataset = Dataset.by_name(dataset)
entities = Entity.all().filter(Entity.invalid!=True)
query = request.args.get('prefix', '').strip()
entities = entities.filter(Entity.name.ilike('%s%%' % query))
entities = entities.offset(get_offset(field='start'))
entities = entities.limit(get_limit(default=20))
matches = []
for entity in entities:
matches.append({
'name': entity.name,
'n:type': {
'id': '/' + dataset.name,
'name': dataset.label
},
'id': entity.id
})
return jsonify({
"code" : "/api/status/ok",
"status" : "200 OK",
"prefix" : query,
"result" : matches
})
开发者ID:IdahoInstitute,项目名称:nomenklatura,代码行数:28,代码来源:reconcile.py
示例11: reconcile
def reconcile(dataset=None):
"""
Reconciliation API, emulates Google Refine API. See:
http://code.google.com/p/google-refine/wiki/ReconciliationServiceApi
"""
if dataset is not None:
dataset = Dataset.by_name(dataset)
# TODO: Add proper support for types and namespacing.
data = request.args.copy()
data.update(request.form.copy())
if 'query' in data:
# single
q = data.get('query')
if q.startswith('{'):
try:
q = json.loads(q)
except ValueError:
raise BadRequest()
else:
q = data
return jsonify(reconcile_op(dataset, q))
elif 'queries' in data:
# multiple requests in one query
qs = data.get('queries')
try:
qs = json.loads(qs)
except ValueError:
raise BadRequest()
queries = {}
for k, q in qs.items():
queries[k] = reconcile_op(dataset, q)
return jsonify(queries)
else:
return reconcile_index(dataset)
开发者ID:OpenRefine,项目名称:nomenklatura,代码行数:35,代码来源:reconcile.py
示例12: create
def create():
data = request_data()
dataset = Dataset.from_form(data)
authz.require(authz.dataset_edit(dataset))
entity = Entity.create(dataset, data, request.account)
db.session.commit()
return redirect(url_for('.view', id=entity.id))
开发者ID:IdahoInstitute,项目名称:nomenklatura,代码行数:7,代码来源:entities.py
示例13: view
def view(dataset, entity):
dataset = Dataset.find(dataset)
entity = Entity.find(dataset, entity)
print entity.data
format = response_format()
if format == 'json':
return jsonify(entity)
query = request.args.get('query', '').strip().lower()
choices = match_op(entity.name, dataset)
choices = filter(lambda (c,e,s): e != entity.id, choices)
if len(query):
choices = filter(lambda (c,e,s): query in Entity.find(dataset,e).name.lower(),
choices)
# THIS is very inefficient - rather do this
# differently
pager = Pager(choices, '.view', dataset=dataset.name,
entity=entity.id, limit=10)
# HACK: Fetch only the entities on the selected page.
entities = Entity.id_map(dataset, map(lambda (c,v,s): v,
pager.query[pager.offset:pager.offset+pager.limit]))
for i, (c,e,s) in enumerate(pager.query):
if e in entities:
pager.query[i] = (c, entities.get(e), s)
return render_template('entity/view.html', dataset=dataset,
entity=entity, entities=pager, query=query)
开发者ID:OpenRefine,项目名称:nomenklatura,代码行数:27,代码来源:entity.py
示例14: import_upload
def import_upload(dataset_name, sig, account_id,
value_col, link_col):
dataset = Dataset.find(dataset_name)
account = Account.by_id(account_id)
metadata, row_set = parse_upload(dataset, sig)
headers = detect_headers(row_set)
for row in row_set:
data = dict([(c.column, c.value) for c in row])
value = data.pop(value_col) if value_col else None
link = data.pop(link_col) if link_col else None
if link_col:
d = {'key': link, 'data': data}
link_obj = Link.lookup(dataset, d, account,
match_value=False)
data = {}
if value_col:
d = {'value': value, 'data': data}
value_obj = Value.by_value(dataset, value)
if value_obj is None:
value_obj = Value.create(dataset,
d, account)
value_obj.data = data
if link_col and value_col:
link_obj.match(dataset, {'choice': value_obj.id},
account)
db.session.commit()
开发者ID:okfn,项目名称:nomenklatura,代码行数:26,代码来源:importer.py
示例15: set_template_globals
def set_template_globals():
return {
"datasets": Dataset.all(),
"authz": authz,
"avatar_url": session.get("avatar_url", ""),
"logged_in": request.account is not None,
"login": request.account.login if request.account else None,
}
开发者ID:rgrp,项目名称:nomenklatura,代码行数:8,代码来源:web.py
示例16: set_template_globals
def set_template_globals():
return {
'datasets': Dataset.all(),
'authz': authz,
'avatar_url': session.get('avatar_url', ''),
'logged_in': request.account is not None,
'login': request.account.login if request.account else None
}
开发者ID:gabelula,项目名称:nomenklatura,代码行数:8,代码来源:__init__.py
示例17: match
def match():
dataset_arg = request.args.get('dataset')
dataset = Dataset.find(dataset_arg)
matches = find_matches(dataset,
request.args.get('name'),
filter=request.args.get('filter'),
exclude=arg_int('exclude'))
return query_pager(matches)
开发者ID:IdahoInstitute,项目名称:nomenklatura,代码行数:8,代码来源:matching.py
示例18: match
def match():
dataset_arg = request.args.get('dataset')
dataset = Dataset.find(dataset_arg)
matches = find_matches(dataset,
request.args.get('name'),
filter=request.args.get('filter'),
exclude=arg_int('exclude'))
pager = Pager(matches)
return jsonify(pager.to_dict())
开发者ID:adamchainz,项目名称:nomenklatura,代码行数:9,代码来源:matching.py
示例19: create
def create():
authz.require(authz.dataset_create())
data = request_content()
try:
dataset = Dataset.create(data, request.account)
db.session.commit()
return redirect(url_for('.view', dataset=dataset.name))
except Invalid, inv:
return handle_invalid(inv, new, data=data)
开发者ID:okfn,项目名称:nomenklatura,代码行数:9,代码来源:dataset.py
示例20: upload
def upload(dataset):
dataset = Dataset.find(dataset)
authz.require(authz.dataset_edit(dataset))
file_ = request.files.get('file')
if not file_ or not file_.filename:
inv = Invalid("No file.", None, None,
error_dict={'file': "You need to upload a file"})
raise inv
upload = upload_file(dataset, file_, request.account)
return redirect(url_for('.map', dataset=dataset.name, id=upload.id))
开发者ID:gabelula,项目名称:nomenklatura,代码行数:10,代码来源:upload.py
注:本文中的nomenklatura.model.Dataset类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论