本文整理汇总了Python中rdflib.util.from_n3函数的典型用法代码示例。如果您正苦于以下问题:Python from_n3函数的具体用法?Python from_n3怎么用?Python from_n3使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了from_n3函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_util_from_n3_expectliteralwithdatatypefrombool
def test_util_from_n3_expectliteralwithdatatypefrombool(self):
s = 'true'
res = util.from_n3(s)
self.assertEqual(res, Literal(True))
s = 'false'
res = util.from_n3(s)
self.assertEqual(res, Literal(False))
开发者ID:JesusPatate,项目名称:rdflib,代码行数:7,代码来源:test_util.py
示例2: get_semantic_associations
def get_semantic_associations(fn=None, limit=None):
if not fn:
verified_mappings = get_verified_mappings()
semantic_associations = get_dbpedia_pairs_from_mappings(
verified_mappings)
semantic_associations = [URIRefify(p) for p in semantic_associations]
else:
semantic_associations = []
with gzip.open(fn) if fn.endswith('.gz') else open(fn) as f:
# expects a file with one space separated pair of n3 encoded IRIs
# per line
r = csv.DictReader(
f,
delimiter=b' ',
doublequote=False,
escapechar=None,
quoting=csv.QUOTE_NONE,
)
assert r.fieldnames == ['source', 'target']
for i, row in enumerate(r):
if limit and i >= limit:
break
source = from_n3(row['source'].decode('UTF-8'))
target = from_n3(row['target'].decode('UTF-8'))
semantic_associations.append((source, target))
return semantic_associations
开发者ID:RDFLib,项目名称:graph-pattern-learner,代码行数:26,代码来源:ground_truth_tools.py
示例3: test_util_from_n3_expectpartialidempotencewithn3
def test_util_from_n3_expectpartialidempotencewithn3(self):
for n3 in ('<http://ex.com/foo>',
'"foo"@de',
#'"\\""', # exception as '\\"' --> '"' by orig parser as well
'"""multi\n"line"\nstring"""@en'):
self.assertEqual(util.from_n3(n3).n3(), n3,
'from_n3(%(n3e)r).n3() != %(n3e)r' % {'n3e': n3})
开发者ID:JesusPatate,项目名称:rdflib,代码行数:7,代码来源:test_util.py
示例4: test_util_from_n3_expectsameasn3parser
def test_util_from_n3_expectsameasn3parser(self):
def parse_n3(term_n3):
''' Disclaimer: Quick and dirty hack using the n3 parser. '''
prepstr = ("@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
"<urn:no_use> <urn:no_use> %s.\n" % term_n3)
g = ConjunctiveGraph()
g.parse(data=prepstr, format='n3')
return [t for t in g.triples((None, None, None))][0][2]
for n3 in (# "michel", # won't parse in original parser
# "_:michel", # BNodes won't be the same
'"michel"',
'<http://example.org/schema>',
'"michel"@fr',
# '"michel"@fr^^xsd:fr', # FIXME: invalid n3, orig parser will prefer datatype
# '"true"^^xsd:boolean', # FIXME: orig parser will expand xsd prefix
'42',
'true',
'false',
'"""multi\nline\nstring"""@en',
'<http://ex.com/foo>',
'"foo"@de',
'"\\""@en',
'"""multi\n"line"\nstring"""@en'):
res, exp = util.from_n3(n3), parse_n3(n3)
self.assertEquals(res, exp,
'from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r' % {
'res': res, 'exp': exp, 'n3e':n3})
开发者ID:JesusPatate,项目名称:rdflib,代码行数:28,代码来源:test_util.py
示例5: lookup_blanks
def lookup_blanks(self, g, bn, conn):
"""Recursively find any relevant blank nodes for
the current lookup
@param g The graph
@param bn The blank node ID (starting _:)
@param conn The database connection
"""
cursor = conn.cursor()
cursor.execute("""select subject, property, object from triples where
page="<BLANK>" """, (bn[2:],))
rows = cursor.fetchall()
if rows:
for s, p, o in rows:
g.add((from_n3(s), from_n3(p), from_n3(o)))
if o.startswith("_:"):
self.lookup_blanks(g, o, conn)
cursor.close()
开发者ID:jmccrae,项目名称:yuzu,代码行数:17,代码来源:backend.py
示例6: ask_NODE
def ask_NODE(self, g, sections, var, prompt):
answer = self.input(prompt)
if answer.startswith("c") and var.classhint and var.classhint in sections:
s = sections[answer[1:].strip()]
node = s.construct(g, sections, None)
print("back to {}".format(self.name), file=self.out)
return node
elif answer:
return util.from_n3(answer)
else:
return None
开发者ID:cnh,项目名称:pkb4unix,代码行数:11,代码来源:construction.py
示例7: summarize
def summarize(self, id):
"""Summarize an id
@param id The id
@return A RDFlib Graph or None if the ID is not found
"""
g = ConjunctiveGraph()
conn = sqlite3.connect(self.db)
cursor = conn.cursor()
cursor.execute(
"""select subject, property, object from triples where
subject=?""", ("<%s%s>" % (BASE_NAME, unicode_escape(id)),))
rows = cursor.fetchall()
added = 0
if rows:
for s, p, o in rows:
for f in FACETS:
if added < 20 and str(p)[1:-1] == f["uri"]:
g.add((from_n3(s), from_n3(p), from_n3(o)))
added += 1
conn.close()
return g
开发者ID:jmccrae,项目名称:yuzu,代码行数:22,代码来源:backend.py
示例8: lookup
def lookup(self, id):
"""Resolve a single id
@param id The id
@return A RDFlib Graph or None if the ID is not found
"""
g = ConjunctiveGraph()
g.bind("lemon", "http://lemon-model.net/lemon#")
g.bind("owl", str(OWL))
conn = sqlite3.connect(self.db)
cursor = conn.cursor()
cursor.execute(
"""select subject, property, object from triples where
page=?""", (unicode_escape(id),))
rows = cursor.fetchall()
if rows:
for s, p, o in rows:
g.add((from_n3(s), from_n3(p), from_n3(o)))
if o.startswith("_:"):
self.lookup_blanks(g, o, conn)
conn.close()
return g
else:
return None
开发者ID:jmccrae,项目名称:yuzu,代码行数:24,代码来源:backend.py
示例9: list_values
def list_values(self, offset, limit, prop):
"""
Produce a list of all possible values for a particular property
@param offset Where to start listing
@param limit Number of values to list
@param prop The property to list for
@return A tuple consisting of a boolean indicating if there are more
results and list of values that exist (as N3)
"""
conn = sqlite3.connect(self.db)
cursor = conn.cursor()
if not offset:
offset = 0
cursor.execute("""SELECT DISTINCT object, obj_label, count(*)
FROM triples WHERE property=? AND head=0
GROUP BY oid ORDER BY count(*) DESC
LIMIT ? OFFSET ?""", (prop, limit + 1, offset))
row = cursor.fetchone()
n = 0
results = []
while n < limit and row:
obj, label, count = row
n3 = from_n3(obj)
if type(n3) == Literal:
results.append({'link': obj, 'label': n3.value,
'count': count})
elif type(n3) == URIRef:
# u = self.unname(str(n3))
# if u:
# s, _ = u
if label:
results.append({'link': obj, 'label': label,
'count': count})
else:
# results.append({'link': obj, 'label': s,
# 'count': count})
# else:
results.append({'link': obj,
'label': yuzu.displayer.DISPLAYER.apply(
str(n3)),
'count': count})
n += 1
row = cursor.fetchone()
conn.close()
return n == limit, results
开发者ID:jmccrae,项目名称:yuzu,代码行数:45,代码来源:backend.py
示例10: srtsx_body2
def srtsx_body2(r, vars):
for v in vars:
val = from_n3(r[vars.index(v)])
if isinstance(val, URIRef):
yield (" <binding name=\"%s\"><uri>%s</uri></binding>"
% (v, str(val)))
elif isinstance(val, BNode):
yield (" <binding name=\"%s\"><bnode>%s</bnode></binding>"
% (v, str(val)))
elif val.language:
yield (" <binding name=\"%s\"><literal xml:lang=\"%s\">"
"%s</literal></binding>" % (v, val.language, str(val)))
elif val.datatype:
yield(" <binding name=\"%s\"><literal datatype=\"%s\">"
"%s</literal></binding>" % (v, val.datatype, str(val)))
else:
yield(" <binding name=\"%s\"><literal>%s</literal></binding>"
% (v, str(val)))
开发者ID:jmccrae,项目名称:yuzu,代码行数:18,代码来源:model.py
示例11: srtsj_body2
def srtsj_body2(r, vars):
for v in vars:
val = from_n3(r[vars.index(v)])
if not val:
yield ""
if isinstance(val, URIRef):
yield (" \"%s\": { \"type\": \"uri\", \"value\": \"%s\" }"
% (v, str(val)))
elif isinstance(val, BNode):
yield (" \"%s\": { \"type\": \"bnode\", \"value\": \"%s\" }"
% (v, str(val)))
elif val.language:
yield (" \"%s\": { \"type\": \"literal\", \"xml:lang\": "
"\"%s\", \"value\": \"%s\" }" % (v, val.language, str(val)))
elif val.datatype:
yield (" \"%s\": { \"type\": \"literal\", \"datatype\": "
"\"%s\", \"value\": \"%s\" }" % (v, val.datatype,
str(val)))
else:
yield (" \"%s\": { \"type\": \"literal\", \"value\": \"%s\" }"
% (v, str(val)))
开发者ID:jmccrae,项目名称:yuzu,代码行数:21,代码来源:model.py
示例12: entry
#.........这里部分代码省略.........
#cursor.execute("select * from senses where wordid=? and casedwordid is NULL", (word_id,))
cursor.execute("select * from senses where wordid=?", (word_id,))
else:
cursor.execute("select casedwordid from casedwords where cased=?",(cased_lemma,))
row = cursor.fetchone()
if row is None:
return None
casedwordid, = row
cursor.execute("select * from senses where casedwordid=?", (casedwordid,))
for _, casedwordid, synsetid, senseid, sensenum, lexid, tagcount, old_sensekey, sensekey in cursor.fetchall():
# NB. This could also be achieved by querying "casedwordid is NULL" however
# this is significantly slower, so we filter in Python checking we return cased
# forms only for cased lemmas
if cased_lemma.islower() == bool(casedwordid):
continue
if sensekey[-1] == pos:
this_pos_found = True
_, sensekey2 = sensekey.split('#')
sense_uri = entry_name(cased_lemma, pos, sensekey2)
graph.add((entry_uri, lemon.sense, sense_uri))
graph.add((sense_uri, RDF.type, lemon.LexicalSense))
graph.add((sense_uri, lemon.reference, synset_name(context, synsetid, pos)))
graph.add((sense_uri, wn_ontology.sense_number, Literal(sensenum)))
graph.add((sense_uri, wn_ontology.tag_count, Literal(tagcount)))
graph.add((sense_uri, wn_ontology.lex_id, Literal(lexid)))
graph.add((sense_uri, wn_ontology.old_sense_key, Literal(old_sensekey)))
# Now adjective positions
cursor.execute("select position from adjpositions where synsetid=? and wordid=?", (synsetid, word_id))
rows = cursor.fetchall()
for position, in rows:
graph.add((sense_uri, wn_ontology.adjposition,
URIRef(wn_ontology.term(quote_plus(context.adjposition_names[position])))))
# Add definition also to sense
cursor.execute("select definition from synsets where synsetid=?", (synsetid,))
for definition, in cursor.fetchall():
graph.add((sense_uri, wn_ontology.gloss, Literal(definition, lang=context.lang)))
# Sense links
cursor.execute("select senseid2, linkid from lexlinks where senseid1=?", (senseid,))
for senseid2, linkid in cursor.fetchall():
cursor.execute("select sensekey from senses where senseid=?", (senseid2,))
sensekey3, = cursor.fetchone()
sense2_lemma, sense2_key = sensekey3.split('#')
pos2 = sensekey3[-1]
sense_uri2 = entry_name(sense2_lemma, pos2, sense2_key)
graph.add((sense_uri, wn_ontology.term(context.linktypes[linkid]), sense_uri2))
# Verb frames (maybe only if pos=='v'?)
cursor.execute("select sentenceid from vframesentencemaps where synsetid=? and wordid=?",
(synsetid, word_id))
for sentenceid, in cursor.fetchall():
graph.add((sense_uri, wn_ontology.verb_frame_sentence,
Literal(context.vframesentences[sentenceid], lang=context.lang)))
# Sense tags
cursor.execute("select position, senseid from sensetags inner join taggedtexts on sensetags.sensetagid=taggedtexts.sensetagid where new_sensekey=?",(sensekey,)) # unindexed
for position, senseid in cursor.fetchall():
cursor.execute("select sensekey from senses where senseid=?",(senseid,))
for sensekey, in cursor.fetchall():
if position:
comp_uri = entry_name(sensekey[0:sensekey.index('#')].replace("_"," "),sensekey[-1],'Component-' + str(position+1))
graph.add((sense_uri, wn_ontology.sense_tag, comp_uri))
# LexVo Link
graph.add((sense_uri, OWL.sameAs, translate_to_lexvo(old_sensekey, pos)))
if not this_pos_found:
return None
if pos == "p":
words = lemma.split(" ")
node = BNode()
comp1 = entry_name(lemma, pos, "Component-1")
graph.add((entry_uri, lemon.decomposition, node))
graph.add((node, RDF.first, comp1))
graph.add((comp1, RDFS.label, Literal(words[0], lang=context.lang)))
graph.add((comp1, RDF.type, lemon.Component))
for idx in range(1,len(words)):
node2 = BNode()
graph.add((node, RDF.rest, node2))
node = node2
comp_uri = entry_name(lemma, pos, "Component-" + str(idx + 1))
graph.add((node, RDF.first, comp_uri))
graph.add((comp_uri, RDFS.label, Literal(words[idx], lang=context.lang)))
graph.add((comp_uri, RDF.type, lemon.Component))
graph.add((node, RDF.rest, RDF.nil))
try:
cursor.execute("select fragment, property, object from entrytriples where lemma=?",(quote_plus(lemma)+"-"+pos,))
for f, p, o in cursor.fetchall():
graph.add((entry_name(lemma,pos,f), from_n3(p), from_n3(o)))
except:
pass
return graph
开发者ID:jmccrae,项目名称:wn-rdf,代码行数:101,代码来源:WNRDF.py
示例13: test_util_from_n3_expectliteralwithescapedquote
def test_util_from_n3_expectliteralwithescapedquote(self):
s = '"\\""'
res = util.from_n3(s, default=None, backend=None)
self.assert_(res, Literal('\\"', lang='en'))
开发者ID:JesusPatate,项目名称:rdflib,代码行数:4,代码来源:test_util.py
示例14: test_util_from_n3_expectliteralmultiline
def test_util_from_n3_expectliteralmultiline(self):
s = '"""multi\nline\nstring"""@en'
res = util.from_n3(s, default=None, backend=None)
self.assert_(res, Literal('multi\nline\nstring', lang='en'))
开发者ID:JesusPatate,项目名称:rdflib,代码行数:4,代码来源:test_util.py
示例15: test_util_from_n3_expectliteralwithtrailingbackslash
def test_util_from_n3_expectliteralwithtrailingbackslash(self):
s = '"trailing\\\\"^^<http://www.w3.org/2001/XMLSchema#string>'
res = util.from_n3(s)
self.assert_(res, Literal('trailing\\', datatype=XSD['string']))
self.assert_(res.n3(), s)
开发者ID:Dataliberate,项目名称:rdflib,代码行数:5,代码来源:test_util.py
示例16: main
def main(
resdir,
sparql_endpoint,
max_queries,
clustering_variant,
fusion_methods,
timeout,
max_results,
max_target_candidates_per_gp,
batch_predict,
drop_bad_uris,
**_ # gulp remaining kwargs
):
from gp_query import calibrate_query_timeout
from serialization import load_results
from serialization import find_last_result
from cluster import cluster_gps_to_reduce_queries
from gp_learner import init_workers
# init workers
init_workers()
sparql = SPARQLWrapper.SPARQLWrapper(sparql_endpoint)
timeout = timeout if timeout > 0 else calibrate_query_timeout(sparql)
# load model
last_res = find_last_result()
if not last_res:
logger.error('cannot find fully trained model in %s', resdir)
sys.exit(1)
result_patterns, coverage_counts, gtp_scores = load_results(last_res)
gps = [gp for gp, _ in result_patterns]
gps = cluster_gps_to_reduce_queries(
gps, max_queries, gtp_scores, clustering_variant)
processed = 0
start = time.time()
batch_size = config.BATCH_SIZE if batch_predict else 1
# main loop
for lines in chunker(sys.stdin, batch_size):
batch = []
for line in lines:
line = line.strip()
if not line:
continue
if drop_bad_uris:
# noinspection PyBroadException
try:
source = from_n3(line)
utils.curify(source)
except Exception:
logger.warning(
'Warning: Could not curify URI %s! Skip.', line)
continue
if line[0] not in '<"':
logger.error(
'expected inputs to start with < or ", but got: %s', line)
sys.exit(1)
source = from_n3(line)
batch.append(source)
batch = list(OrderedDict.fromkeys(batch))
if len(batch) == 0:
pass
elif len(batch) == 1:
res = predict(
sparql, timeout, gps, batch[0], fusion_methods,
max_results, max_target_candidates_per_gp
)
print(json.dumps(res))
logger.info(
'Predicted %d target candidates for %s',
res['orig_result_length'], res['source']
)
else:
res = multi_predict(
sparql, timeout, gps, batch, fusion_methods,
max_results, max_target_candidates_per_gp
)
for r in res:
print(json.dumps(r))
logger.info('\n'.join([
'Predicted %d target candidates for %s' % (
r['orig_result_length'], r['source']
) for r in res
]))
processed += len(batch)
logger.info(
'Have processed %d URIs now. Took %s sec',
processed, time.time()-start)
开发者ID:RDFLib,项目名称:graph-pattern-learner,代码行数:91,代码来源:predict.py
示例17: test_util_from_n3_expectquotedgraph
def test_util_from_n3_expectquotedgraph(self):
s = '{<http://example.com/schema>}'
res = util.from_n3(s, default=None, backend="IOMemory")
self.assertTrue(isinstance(res, QuotedGraph))
开发者ID:drewp,项目名称:rdflib,代码行数:4,代码来源:test_util.py
示例18: test_util_from_n3_expectliteralandlang
def test_util_from_n3_expectliteralandlang(self):
s = '"michel"@fr'
res = util.from_n3(s, default=None, backend=None)
self.assert_(isinstance(res, Literal))
开发者ID:JesusPatate,项目名称:rdflib,代码行数:4,代码来源:test_util.py
示例19: ask_LITERAL
def ask_LITERAL(self, g, sections, var, prompt):
answer = self.input(prompt)
if answer.startswith('"') or answer.startswith("'"):
return util.from_n3(answer)
else:
return Literal(answer, lang=var.langhint, datatype=var.datatypehint)
开发者ID:cnh,项目名称:pkb4unix,代码行数:6,代码来源:construction.py
示例20: test_util_from_n3_expectbnode
def test_util_from_n3_expectbnode(self):
s = "_:michel"
res = util.from_n3(s, default=None, backend=None)
self.assert_(isinstance(res, BNode))
开发者ID:JesusPatate,项目名称:rdflib,代码行数:4,代码来源:test_util.py
注:本文中的rdflib.util.from_n3函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论