本文整理汇总了Python中regparser.tree.struct.Node类的典型用法代码示例。如果您正苦于以下问题:Python Node类的具体用法?Python Node怎么用?Python Node使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Node类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: paragraph_with_marker
def paragraph_with_marker(self, text, tagged_text):
"""The paragraph has a marker, like (a) or a. etc."""
# To aid in determining collapsed paragraphs, replace any
# keyterms present
node_for_keyterms = Node(text, node_type=Node.APPENDIX)
node_for_keyterms.tagged_text = tagged_text
node_for_keyterms.label = [initial_marker(text)[0]]
keyterm = KeyTerms.get_keyterm(node_for_keyterms)
if keyterm:
mtext = text.replace(keyterm, ';'*len(keyterm))
else:
mtext = text
for mtext in split_paragraph_text(mtext):
if keyterm: # still need the original text
mtext = mtext.replace(';'*len(keyterm), keyterm)
# label_candidate = [initial_marker(mtext)[0]]
# existing_node = None
# for node in self.nodes:
# if node.label == label_candidate:
# existing_node = node
# if existing_node:
# self.paragraph_counter += 1
# node = Node(mtext, node_type=Node.APPENDIX,
# label=['dup{}'.format(self.paragraph_counter),
# initial_marker(mtext)[0]])
# else:
node = Node(mtext, node_type=Node.APPENDIX,
label=[initial_marker(mtext)[0]])
node.tagged_text = tagged_text
self.nodes.append(node)
开发者ID:phildini,项目名称:regulations-parser,代码行数:31,代码来源:appendices.py
示例2: test_create_xml_changes_child_stars
def test_create_xml_changes_child_stars(self):
labels_amended = [Amendment('PUT', '200-?-2-a')]
with XMLBuilder("ROOT") as ctx:
ctx.P("(a) Content")
ctx.STARS()
n2a = Node('(a) Content', label=['200', '2', 'a'],
source_xml=ctx.xml.xpath('//P')[0])
n2b = Node('(b) Content', label=['200', '2', 'b'])
n2 = Node('n2', label=['200', '2'], children=[n2a, n2b])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
amendments.create_xml_changes(labels_amended, root, notice_changes)
data = notice_changes.changes_by_xml[None]
self.assertIn('200-2-a', data)
self.assertTrue(1, len(data['200-2-a']))
change = data['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertNotIn('field', change)
n2a.text = n2a.text + ":"
n2a.source_xml.text = n2a.source_xml.text + ":"
notice_changes = changes.NoticeChanges()
amendments.create_xml_changes(labels_amended, root, notice_changes)
data = notice_changes.changes_by_xml[None]
self.assertIn('200-2-a', data)
self.assertTrue(1, len(data['200-2-a']))
change = data['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertEqual('[text]', change.get('field'))
开发者ID:anthonygarvan,项目名称:regulations-parser,代码行数:33,代码来源:notice_amendments_tests.py
示例3: test_create_xml_changes_child_stars
def test_create_xml_changes_child_stars():
labels_amended = [Amendment('PUT', '200-?-2-a')]
with XMLBuilder("ROOT") as ctx:
ctx.P("(a) Content")
ctx.STARS()
n2a = Node('(a) Content', label=['200', '2', 'a'],
source_xml=ctx.xml.xpath('//P')[0])
n2b = Node('(b) Content', label=['200', '2', 'b'])
n2 = Node('n2', label=['200', '2'], children=[n2a, n2b])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
fetch.create_xml_changes(labels_amended, root, notice_changes)
data = notice_changes[None]
assert '200-2-a' in data
assert len(data['200-2-a']) == 1
change = data['200-2-a'][0]
assert change['action'] == 'PUT'
assert 'field' not in change
n2a.text = n2a.text + ":"
n2a.source_xml.text = n2a.source_xml.text + ":"
notice_changes = changes.NoticeChanges()
fetch.create_xml_changes(labels_amended, root, notice_changes)
data = notice_changes[None]
assert '200-2-a' in data
assert len(data['200-2-a']) == 1
change = data['200-2-a'][0]
assert change['action'] == 'PUT'
assert change.get('field') == '[text]'
开发者ID:eregs,项目名称:regulations-parser,代码行数:33,代码来源:fetch_tests.py
示例4: test_create_xml_changes_child_stars
def test_create_xml_changes_child_stars(self):
labels_amended = [Amendment("PUT", "200-2-a")]
xml = etree.fromstring("<ROOT><P>(a) Content</P><STARS /></ROOT>")
n2a = Node("(a) Content", label=["200", "2", "a"], source_xml=xml.xpath("//P")[0])
n2b = Node("(b) Content", label=["200", "2", "b"])
n2 = Node("n2", label=["200", "2"], children=[n2a, n2b])
root = Node("root", label=["200"], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue("200-2-a" in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes["200-2-a"]))
change = notice_changes.changes["200-2-a"][0]
self.assertEqual("PUT", change["action"])
self.assertFalse("field" in change)
n2a.text = n2a.text + ":"
n2a.source_xml.text = n2a.source_xml.text + ":"
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue("200-2-a" in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes["200-2-a"]))
change = notice_changes.changes["200-2-a"][0]
self.assertEqual("PUT", change["action"])
self.assertEqual("[text]", change.get("field"))
开发者ID:EricSchles,项目名称:regulations-parser,代码行数:28,代码来源:notice_build_tests.py
示例5: section_node
def section_node(self):
n1 = Node('n2', label=['200', '2'])
n2 = Node('n2a', label=['200', '2', 'a'])
n1.children = [n2]
root = Node('root', label=['200'], children=[n1])
return root
开发者ID:anthonygarvan,项目名称:regulations-parser,代码行数:7,代码来源:notice_changes_tests.py
示例6: nodes_from_interp_p
def nodes_from_interp_p(xml_node):
"""Given an XML node that contains text for an interpretation paragraph,
split it into sub-paragraphs and account for trailing stars"""
node_text = tree_utils.get_node_text(xml_node, add_spaces=True)
text_with_tags = tree_utils.get_node_text_tags_preserved(xml_node)
first_marker = get_first_interp_marker(text_with_tags)
collapsed = collapsed_markers_matches(node_text, text_with_tags)
# -2 throughout to account for matching the character + period
ends = [m.end() - 2 for m in collapsed[1:]] + [len(node_text)]
starts = [m.end() - 2 for m in collapsed] + [len(node_text)]
# Node for this paragraph
n = Node(node_text[0:starts[0]], label=[first_marker],
node_type=Node.INTERP)
n.tagged_text = text_with_tags
yield n
if n.text.endswith('* * *'):
yield Node(label=[mtypes.INLINE_STARS])
# Collapsed-marker children
for match, end in zip(collapsed, ends):
marker = match.group(1)
if marker == '1':
marker = '<E T="03">1</E>'
n = Node(node_text[match.end() - 2:end], label=[marker],
node_type=Node.INTERP)
yield n
if n.text.endswith('* * *'):
yield Node(label=[mtypes.INLINE_STARS])
开发者ID:tadhg-ohiggins,项目名称:regulations-parser,代码行数:30,代码来源:interpretations.py
示例7: test_dict_to_node
def test_dict_to_node(self):
dict_node = {
'text': 'node text',
'label': ['205', 'A'],
'node_type': 'appendix'}
node = compiler.dict_to_node(dict_node)
self.assertEqual(
node,
Node('node text', [], ['205', 'A'], None, 'appendix'))
dict_node['tagged_text'] = '<E> Tagged </E> text.'
node = compiler.dict_to_node(dict_node)
actual_node = Node('node text', [], ['205', 'A'], None, 'appendix')
actual_node.tagged_text = '<E> Tagged </E> text.'
created_node = compiler.dict_to_node(dict_node)
self.assertEqual(actual_node, created_node)
self.assertEqual(actual_node.tagged_text, created_node.tagged_text)
dict_node = {
'text': 'node text'
}
node = compiler.dict_to_node(dict_node)
self.assertEqual(node, dict_node)
开发者ID:theresaanna,项目名称:regulations-parser,代码行数:30,代码来源:notice_compiler_tests.py
示例8: collapsed_markers_matches
def collapsed_markers_matches(node_text, tagged_text):
"""Find collapsed markers, i.e. tree node paragraphs that begin within a
single XML node, within this text. Remove citations and other false
positives. This is pretty hacky right now -- it focuses on the plain
text but takes cues from the tagged text. @todo: streamline logic"""
# In addition to the regex above, keyterms are an acceptable prefix. We
# therefore convert keyterms to satisfy the above regex
node_for_keyterms = Node(node_text, node_type=Node.INTERP,
label=[get_first_interp_marker(node_text)])
node_for_keyterms.tagged_text = tagged_text
keyterm = KeyTerms.get_keyterm(node_for_keyterms)
if keyterm:
node_text = node_text.replace(keyterm, '.'*len(keyterm))
collapsed_markers = []
for marker in _first_markers:
possible = ((m, m.start(), m.end())
for m in marker.finditer(node_text) if m.start() > 0)
possible = remove_citation_overlaps(node_text, possible)
# If certain characters follow, kill it
for following in ("e.", ")", u"”", '"', "'"):
possible = [(m, s, end) for m, s, end in possible
if not node_text[end:].startswith(following)]
possible = [m for m, _, _ in possible]
# As all "1." collapsed markers must be emphasized, run a quick
# check to weed out some false positives
if '<E T="03">1' not in tagged_text:
possible = filter(lambda m: m.group(1) != '1', possible)
collapsed_markers.extend(possible)
return collapsed_markers
开发者ID:cmc333333,项目名称:regulations-parser,代码行数:30,代码来源:interpretations.py
示例9: test_create_xml_changes_child_stars
def test_create_xml_changes_child_stars(self):
labels_amended = [Amendment('PUT', '200-2-a')]
xml = etree.fromstring("<ROOT><P>(a) Content</P><STARS /></ROOT>")
n2a = Node('(a) Content', label=['200', '2', 'a'],
source_xml=xml.xpath('//P')[0])
n2b = Node('(b) Content', label=['200', '2', 'b'])
n2 = Node('n2', label=['200', '2'], children=[n2a, n2b])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertFalse('field' in change)
n2a.text = n2a.text + ":"
n2a.source_xml.text = n2a.source_xml.text + ":"
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertEqual('[text]', change.get('field'))
开发者ID:theresaanna,项目名称:regulations-parser,代码行数:29,代码来源:notice_build_tests.py
示例10: test_keyterm_is_first_not_first
def test_keyterm_is_first_not_first(self):
node = Node('(a) This has a list: apples et seq.',
label=['101', '22', 'a'])
node.tagged_text = '(a) This has a list: apples <E T="03">et seq.</E>'
kt = KeyTerms(None)
self.assertFalse(kt.keyterm_is_first(node, 'et seq.'))
开发者ID:cfpb,项目名称:regulations-parser,代码行数:7,代码来源:layer_keyterms_tests.py
示例11: test_no_keyterm
def test_no_keyterm(self):
node = Node('(a) Apples are grown in New Zealand.',
label=['101', '22', 'a'])
node.tagged_text = '(a) Apples are grown in New Zealand.'
kt = KeyTerms(None)
results = kt.process(node)
self.assertEquals(results, None)
开发者ID:cfpb,项目名称:regulations-parser,代码行数:7,代码来源:layer_keyterms_tests.py
示例12: collapsed_markers_matches
def collapsed_markers_matches(node_text, tagged_text):
"""Find collapsed markers, i.e. tree node paragraphs that begin within a
single XML node, within this text. Remove citations and other false
positives. This is pretty hacky right now -- it focuses on the plain
text but takes cues from the tagged text. @todo: streamline logic"""
# In addition to the regex above, keyterms are an acceptable prefix. We
# therefore convert keyterms to satisfy the above regex
node_for_keyterms = Node(node_text, node_type=Node.INTERP,
label=[get_first_interp_marker(node_text)])
node_for_keyterms.tagged_text = tagged_text
keyterm = KeyTerms.keyterm_in_node(node_for_keyterms)
if keyterm:
node_text = node_text.replace(keyterm, '.' * len(keyterm))
collapsed_markers = []
for marker in _first_markers:
possible = [(m, m.start(), m.end())
for m in marker.finditer(node_text)]
possible = remove_citation_overlaps(node_text, possible)
possible = [triplet[0] for triplet in possible]
collapsed_markers.extend(
match for match in possible
if not false_collapsed_marker(match, node_text, tagged_text)
)
return collapsed_markers
开发者ID:tadhg-ohiggins,项目名称:regulations-parser,代码行数:25,代码来源:interpretations.py
示例13: build_tree
def build_tree(reg_xml):
doc = etree.fromstring(reg_xml)
preprocess_xml(doc)
reg_part = get_reg_part(doc)
title = get_title(doc)
tree = Node("", [], [reg_part], title)
part = doc.xpath('//PART')[0]
subpart_xmls = [c for c in part.getchildren() if c.tag == 'SUBPART']
if len(subpart_xmls) > 0:
subparts = [build_subpart(reg_part, s) for s in subpart_xmls]
tree.children = subparts
else:
section_xmls = [c for c in part.getchildren() if c.tag == 'SECTION']
sections = []
for section_xml in section_xmls:
sections.extend(build_from_section(reg_part, section_xml))
empty_part = reg_text.build_empty_part(reg_part)
empty_part.children = sections
tree.children = [empty_part]
non_reg_sections = build_non_reg_text(doc, reg_part)
tree.children += non_reg_sections
return tree
开发者ID:khandelwal,项目名称:regulations-parser,代码行数:28,代码来源:reg_text.py
示例14: tree_with_subparts
def tree_with_subparts(self):
nsa = Node("nsa", label=["205", "Subpart", "A"], node_type=Node.SUBPART)
nsb = Node("nsb", label=["205", "Subpart", "B"], node_type=Node.SUBPART)
nappa = Node("nappa", label=["205", "Appendix", "C"], node_type=Node.APPENDIX)
root = Node("", label=["205"])
root.children = [nsa, nsb, nappa]
return root
开发者ID:phildini,项目名称:regulations-parser,代码行数:10,代码来源:notice_compiler_tests.py
示例15: test_keyterm_and_emphasis
def test_keyterm_and_emphasis(self):
node = Node('(a) Apples. Apples are grown in '
+ 'New Zealand.', label=['101', '22', 'a'])
node.tagged_text = '(a) <E T="03">Apples.</E> Apples are grown in ' +\
'New <E T="03">Zealand.</E>'
kt = KeyTerms(None)
results = kt.process(node)
self.assertNotEqual(results, None)
self.assertEqual(results[0]['key_term'], 'Apples.')
self.assertEqual(results[0]['locations'], [0])
开发者ID:cfpb,项目名称:regulations-parser,代码行数:10,代码来源:layer_keyterms_tests.py
示例16: test_interpretation_markers
def test_interpretation_markers(self):
node = Node('3. et seq. has a list: apples',
label=['101', 'c', Node.INTERP_MARK, '3'],
node_type=Node.INTERP)
node.tagged_text = '3. <E T="03">et seq.</E> has a list: apples'
kt = KeyTerms(None)
results = kt.process(node)
self.assertNotEqual(results, None)
self.assertEqual(results[0]['key_term'], 'et seq.')
self.assertEqual(results[0]['locations'], [0])
开发者ID:cfpb,项目名称:regulations-parser,代码行数:10,代码来源:layer_keyterms_tests.py
示例17: test_emphasis_close_to_front
def test_emphasis_close_to_front(self):
""" An emphasized word is close to the front, but is not a key term.
"""
node = Node('(a) T et seq. has a list: apples',
label=['101', '22', 'a'])
node.tagged_text = '(a) T <E T="03">et seq.</E> has a list: apples'
kt = KeyTerms(None)
self.assertFalse(kt.keyterm_is_first(node, 'et seq.'))
开发者ID:cfpb,项目名称:regulations-parser,代码行数:10,代码来源:layer_keyterms_tests.py
示例18: paragraph_with_marker
def paragraph_with_marker(self, text, next_text=''):
"""The paragraph has an (a) or a. etc."""
marker, _ = initial_marker(text)
n = Node(text, node_type=Node.APPENDIX, label=[marker])
if initial_marker(next_text):
next_marker, _ = initial_marker(next_text)
else:
next_marker = None
this_p_levels = set(idx for idx, lvl in enumerate(p_levels)
if marker in lvl)
next_p_levels = set(idx for idx, lvl in enumerate(p_levels)
if next_marker in lvl)
previous_levels = [l for l in self.m_stack.m_stack if l]
previous_p_levels = set()
for stack_level in previous_levels:
previous_p_levels.update(sn.p_level for _, sn in stack_level
if hasattr(sn, 'p_level'))
# Ambiguity, e.g. 'i', 'v'. Disambiguate by looking forward
if len(this_p_levels) > 1 and len(next_p_levels) == 1:
next_p_level = next_p_levels.pop()
# e.g. an 'i' followed by a 'ii'
if next_p_level in this_p_levels:
this_p_idx = p_levels[next_p_level].index(marker)
next_p_idx = p_levels[next_p_level].index(next_marker)
if this_p_idx < next_p_idx: # Heuristic
n.p_level = next_p_level
# e.g. (a)(1)(i) followed by an 'A'
new_level = this_p_levels - previous_p_levels
if next_p_level not in previous_p_levels and new_level:
n.p_level = new_level.pop()
# Ambiguity. Disambiguate by looking backwards
if len(this_p_levels) > 1 and not hasattr(n, 'p_level'):
for stack_level in previous_levels:
for lvl, stack_node in stack_level:
if getattr(stack_node, 'p_level', None) in this_p_levels:
# Later levels replace earlier ones
n.p_level = stack_node.p_level
# Simple case (no ambiguity) and cases not seen above
if not getattr(n, 'p_level', None):
n.p_level = min(this_p_levels) # rule of thumb: favor lower case
# Check if we've seen this type of marker before
found_in_prev = False
for stack_level in previous_levels:
if stack_level and in_same_p_level(n, stack_level):
found_in_prev = True
self.depth = stack_level[-1][0]
if not found_in_prev: # New type of marker
self.depth += 1
self.m_stack.add(self.depth, n)
开发者ID:khandelwal,项目名称:regulations-parser,代码行数:55,代码来源:appendices.py
示例19: test_node_definitions_multiple_xml
def test_node_definitions_multiple_xml(self):
"""Find xml definitions which are separated by `and`"""
stack = ParentStack().add(0, Node(label=['9999']))
winter = Node("(4) Cold and dreary mean winter.", label=['9999', '4'])
winter.tagged_text = ('(4) <E T="03">Cold</E> and '
'<E T="03">dreary</E> mean winter.')
inc, _ = Terms(None).node_definitions(winter, stack)
self.assertEqual(len(inc), 2)
cold, dreary = inc
self.assertEqual(cold, Ref('cold', '9999-4', 4))
self.assertEqual(dreary, Ref('dreary', '9999-4', 13))
开发者ID:anthonygarvan,项目名称:regulations-parser,代码行数:11,代码来源:layer_terms_tests.py
示例20: test_keyterm_see
def test_keyterm_see(self):
""" Keyterm tags sometimes enclose phrases such as 'See also' because
those tags are also used for emphasis. """
node = Node('(a) Apples. See Section 101.2',
label=['101', '22', 'a'])
node.tagged_text = '(a) <E T="03">Apples. See also</E>'
kt = KeyTerms(None)
results = kt.process(node)
self.assertEqual('Apples.', results[0]['key_term'])
开发者ID:cfpb,项目名称:regulations-parser,代码行数:11,代码来源:layer_keyterms_tests.py
注:本文中的regparser.tree.struct.Node类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论