本文整理汇总了Java中org.apache.lucene.util.automaton.Operations类的典型用法代码示例。如果您正苦于以下问题:Java Operations类的具体用法?Java Operations怎么用?Java Operations使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Operations类属于org.apache.lucene.util.automaton包,在下文中一共展示了Operations类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: toLookupAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
final Automaton toLookupAutomaton(final CharSequence key) throws IOException {
// TODO: is there a Reader from a CharSequence?
// Turn tokenstream into automaton:
Automaton automaton = null;
try (TokenStream ts = queryAnalyzer.tokenStream("", key.toString())) {
automaton = getTokenStreamToAutomaton().toAutomaton(ts);
}
automaton = replaceSep(automaton);
// TODO: we can optimize this somewhat by determinizing
// while we convert
// This automaton should not blow up during determinize:
automaton = Operations.determinize(automaton, Integer.MAX_VALUE);
return automaton;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:19,代码来源:XAnalyzingSuggester.java
示例2: toAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
private Automaton toAutomaton() {
Automaton a = null;
if (include != null) {
a = include.toAutomaton();
} else if (includeValues != null) {
a = Automata.makeStringUnion(includeValues);
} else {
a = Automata.makeAnyString();
}
if (exclude != null) {
a = Operations.minus(a, exclude.toAutomaton(), Operations.DEFAULT_MAX_DETERMINIZED_STATES);
} else if (excludeValues != null) {
a = Operations.minus(a, Automata.makeStringUnion(excludeValues), Operations.DEFAULT_MAX_DETERMINIZED_STATES);
}
return a;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:17,代码来源:IncludeExclude.java
示例3: testFuzzyQueryType
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
public void testFuzzyQueryType() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("completion")
.field("type", "completion")
.endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping));
FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion");
CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
Query prefixQuery = completionFieldMapper.fieldType().fuzzyQuery("co",
Fuzziness.fromEdits(FuzzyCompletionQuery.DEFAULT_MAX_EDITS), FuzzyCompletionQuery.DEFAULT_NON_FUZZY_PREFIX,
FuzzyCompletionQuery.DEFAULT_MIN_FUZZY_LENGTH, Operations.DEFAULT_MAX_DETERMINIZED_STATES,
FuzzyCompletionQuery.DEFAULT_TRANSPOSITIONS, FuzzyCompletionQuery.DEFAULT_UNICODE_AWARE);
assertThat(prefixQuery, instanceOf(FuzzyCompletionQuery.class));
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:17,代码来源:CompletionFieldMapperTests.java
示例4: toAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
/**
* Create a automaton for a given context query this automaton will be used
* to find the matching paths with the fst
*
* @param preserveSep set an additional char (<code>XAnalyzingSuggester.SEP_LABEL</code>) between each context query
* @param queries list of {@link ContextQuery} defining the lookup context
*
* @return Automaton matching the given Query
*/
public static Automaton toAutomaton(boolean preserveSep, Iterable<ContextQuery> queries) {
Automaton a = Automata.makeEmptyString();
Automaton gap = Automata.makeChar(ContextMapping.SEPARATOR);
if (preserveSep) {
// if separators are preserved the fst contains a SEP_LABEL
// behind each gap. To have a matching automaton, we need to
// include the SEP_LABEL in the query as well
gap = Operations.concatenate(gap, Automata.makeChar(XAnalyzingSuggester.SEP_LABEL));
}
for (ContextQuery query : queries) {
a = Operations.concatenate(Arrays.asList(query.toAutomaton(), gap, a));
}
// TODO: should we limit this? Do any of our ContextQuery impls really create exponential regexps? GeoQuery looks safe (union
// of strings).
return Operations.determinize(a, Integer.MAX_VALUE);
}
开发者ID:baidu,项目名称:Elasticsearch,代码行数:29,代码来源:ContextMapping.java
示例5: toLookupAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
final Automaton toLookupAutomaton(final CharSequence key) throws IOException {
// TODO: is there a Reader from a CharSequence?
// Turn tokenstream into automaton:
Automaton automaton = null;
TokenStream ts = queryAnalyzer.tokenStream("", key.toString());
try {
automaton = getTokenStreamToAutomaton().toAutomaton(ts);
} finally {
IOUtils.closeWhileHandlingException(ts);
}
automaton = replaceSep(automaton);
// TODO: we can optimize this somewhat by determinizing
// while we convert
automaton = Operations.determinize(automaton, DEFAULT_MAX_DETERMINIZED_STATES);
return automaton;
}
开发者ID:europeana,项目名称:search,代码行数:19,代码来源:AnalyzingSuggester.java
示例6: equals
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
/** Returns true iff <code>o</code> is equal to this. */
@Override
public boolean equals(Object o) {
if (!(o instanceof TermAutomatonQuery)) {
return false;
}
TermAutomatonQuery other = (TermAutomatonQuery) o;
if (det == null) {
throw new IllegalStateException("please call finish first");
}
if (other.det == null) {
throw new IllegalStateException("please call other.finish first");
}
// NOTE: not quite correct, because if terms were added in different
// order in each query but the language is the same, we return false:
return (this.getBoost() == other.getBoost())
&& this.termToID.equals(other.termToID) &&
Operations.sameLanguage(det, other.det);
}
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:TermAutomatonQuery.java
示例7: setUp
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
Automaton single = new Automaton();
int initial = single.createState();
int accept = single.createState();
single.setAccept(accept, true);
// build an automaton matching this jvm's letter definition
for (int i = 0; i <= 0x10FFFF; i++) {
if (Character.isLetter(i)) {
single.addTransition(initial, accept, i);
}
}
Automaton repeat = Operations.repeat(single);
jvmLetter = new CharacterRunAutomaton(repeat);
}
开发者ID:europeana,项目名称:search,代码行数:18,代码来源:TestDuelingAnalyzers.java
示例8: testCustomProvider
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
public void testCustomProvider() throws IOException {
AutomatonProvider myProvider = new AutomatonProvider() {
// automaton that matches quick or brown
private Automaton quickBrownAutomaton = Operations.union(Arrays
.asList(Automata.makeString("quick"),
Automata.makeString("brown"),
Automata.makeString("bob")));
@Override
public Automaton getAutomaton(String name) {
if (name.equals("quickBrown")) return quickBrownAutomaton;
else return null;
}
};
RegexpQuery query = new RegexpQuery(newTerm("<quickBrown>"), RegExp.ALL,
myProvider, DEFAULT_MAX_DETERMINIZED_STATES);
assertEquals(1, searcher.search(query, 5).totalHits);
}
开发者ID:europeana,项目名称:search,代码行数:19,代码来源:TestRegexpQuery.java
示例9: testAutomata
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
/**
* Test some very simple automata.
*/
public void testAutomata() throws IOException {
assertAutomatonHits(0, Automata.makeEmpty());
assertAutomatonHits(0, Automata.makeEmptyString());
assertAutomatonHits(2, Automata.makeAnyChar());
assertAutomatonHits(3, Automata.makeAnyString());
assertAutomatonHits(2, Automata.makeString("doc"));
assertAutomatonHits(1, Automata.makeChar('a'));
assertAutomatonHits(2, Automata.makeCharRange('a', 'b'));
assertAutomatonHits(2, Automata.makeInterval(1233, 2346, 0));
assertAutomatonHits(1, Automata.makeInterval(0, 2000, 0));
assertAutomatonHits(2, Operations.union(Automata.makeChar('a'),
Automata.makeChar('b')));
assertAutomatonHits(0, Operations.intersection(Automata
.makeChar('a'), Automata.makeChar('b')));
assertAutomatonHits(1, Operations.minus(Automata.makeCharRange('a', 'b'),
Automata.makeChar('a'), DEFAULT_MAX_DETERMINIZED_STATES));
}
开发者ID:europeana,项目名称:search,代码行数:21,代码来源:TestAutomatonQuery.java
示例10: buildRemoteWhitelist
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
/**
* Build the {@link CharacterRunAutomaton} that represents the reindex-from-remote whitelist and make sure that it doesn't whitelist
* the world.
*/
static CharacterRunAutomaton buildRemoteWhitelist(List<String> whitelist) {
if (whitelist.isEmpty()) {
return new CharacterRunAutomaton(Automata.makeEmpty());
}
Automaton automaton = Regex.simpleMatchToAutomaton(whitelist.toArray(Strings.EMPTY_ARRAY));
automaton = MinimizationOperations.minimize(automaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
if (Operations.isTotal(automaton)) {
throw new IllegalArgumentException("Refusing to start because whitelist " + whitelist + " accepts all addresses. "
+ "This would allow users to reindex-from-remote any URL they like effectively having Elasticsearch make HTTP GETs "
+ "for them.");
}
return new CharacterRunAutomaton(automaton);
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:18,代码来源:TransportReindexAction.java
示例11: convertAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
@Override
protected Automaton convertAutomaton(Automaton a) {
if (unicodeAware) {
// FLORIAN EDIT: get converted Automaton from superclass
Automaton utf8automaton = new UTF32ToUTF8().convert(super.convertAutomaton(a));
// This automaton should not blow up during determinize:
utf8automaton = Operations.determinize(utf8automaton, Integer.MAX_VALUE);
return utf8automaton;
} else {
return super.convertAutomaton(a);
}
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:13,代码来源:XFuzzySuggester.java
示例12: convertAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
protected Automaton convertAutomaton(Automaton a) {
if (queryPrefix != null) {
a = Operations.concatenate(Arrays.asList(queryPrefix, a));
// This automaton should not blow up during determinize:
a = Operations.determinize(a, Integer.MAX_VALUE);
}
return a;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:9,代码来源:XAnalyzingSuggester.java
示例13: simpleMatchToAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
/** Return an {@link Automaton} that matches the given pattern. */
public static Automaton simpleMatchToAutomaton(String pattern) {
List<Automaton> automata = new ArrayList<>();
int previous = 0;
for (int i = pattern.indexOf('*'); i != -1; i = pattern.indexOf('*', i + 1)) {
automata.add(Automata.makeString(pattern.substring(previous, i)));
automata.add(Automata.makeAnyString());
previous = i + 1;
}
automata.add(Automata.makeString(pattern.substring(previous)));
return Operations.concatenate(automata);
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:13,代码来源:Regex.java
示例14: testRegexQueryType
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
public void testRegexQueryType() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("completion")
.field("type", "completion")
.endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping));
FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion");
CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
Query prefixQuery = completionFieldMapper.fieldType()
.regexpQuery(new BytesRef("co"), RegExp.ALL, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
assertThat(prefixQuery, instanceOf(RegexCompletionQuery.class));
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:15,代码来源:CompletionFieldMapperTests.java
示例15: toAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
@Override
public Automaton toAutomaton() {
List<Automaton> automatons = new ArrayList<>();
for (CharSequence value : values) {
automatons.add(Automata.makeString(value.toString()));
}
return Operations.union(automatons);
}
开发者ID:baidu,项目名称:Elasticsearch,代码行数:9,代码来源:CategoryContextMapping.java
示例16: toAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
@Override
public Automaton toAutomaton() {
Automaton automaton;
if(precisions == null || precisions.length == 0) {
automaton = Automata.makeString(location);
} else {
automaton = Automata.makeString(location.substring(0, Math.max(1, Math.min(location.length(), precisions[0]))));
for (int i = 1; i < precisions.length; i++) {
final String cell = location.substring(0, Math.max(1, Math.min(location.length(), precisions[i])));
automaton = Operations.union(automaton, Automata.makeString(cell));
}
}
return automaton;
}
开发者ID:baidu,项目名称:Elasticsearch,代码行数:15,代码来源:GeolocationContextMapping.java
示例17: convertAutomaton
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
@Override
protected Automaton convertAutomaton(Automaton a) {
if (unicodeAware) {
Automaton utf8automaton = new UTF32ToUTF8().convert(a);
utf8automaton = Operations.determinize(utf8automaton, DEFAULT_MAX_DETERMINIZED_STATES);
return utf8automaton;
} else {
return a;
}
}
开发者ID:europeana,项目名称:search,代码行数:11,代码来源:FuzzySuggester.java
示例18: toFiniteStrings
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
final Set<IntsRef> toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException {
// Analyze surface form:
Automaton automaton = null;
TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString());
try {
// Create corresponding automaton: labels are bytes
// from each analyzed token, with byte 0 used as
// separator between tokens:
automaton = ts2a.toAutomaton(ts);
} finally {
IOUtils.closeWhileHandlingException(ts);
}
automaton = replaceSep(automaton);
automaton = convertAutomaton(automaton);
// TODO: LUCENE-5660 re-enable this once we disallow massive suggestion strings
// assert SpecialOperations.isFinite(automaton);
// Get all paths from the automaton (there can be
// more than one path, eg if the analyzer created a
// graph using SynFilter or WDF):
// TODO: we could walk & add simultaneously, so we
// don't have to alloc [possibly biggish]
// intermediate HashSet in RAM:
return Operations.getFiniteStrings(automaton, maxGraphExpansions);
}
开发者ID:europeana,项目名称:search,代码行数:31,代码来源:AnalyzingSuggester.java
示例19: testNFA
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
/**
* Test that a nondeterministic automaton works correctly. (It should will be
* determinized)
*/
public void testNFA() throws IOException {
// accept this or three, the union is an NFA (two transitions for 't' from
// initial state)
Automaton nfa = Operations.union(Automata.makeString("this"),
Automata.makeString("three"));
assertAutomatonHits(2, nfa);
}
开发者ID:europeana,项目名称:search,代码行数:12,代码来源:TestAutomatonQuery.java
示例20: testEquals
import org.apache.lucene.util.automaton.Operations; //导入依赖的package包/类
public void testEquals() {
AutomatonQuery a1 = new AutomatonQuery(newTerm("foobar"), Automata
.makeString("foobar"));
// reference to a1
AutomatonQuery a2 = a1;
// same as a1 (accepts the same language, same term)
AutomatonQuery a3 = new AutomatonQuery(newTerm("foobar"),
Operations.concatenate(
Automata.makeString("foo"),
Automata.makeString("bar")));
// different than a1 (same term, but different language)
AutomatonQuery a4 = new AutomatonQuery(newTerm("foobar"),
Automata.makeString("different"));
// different than a1 (different term, same language)
AutomatonQuery a5 = new AutomatonQuery(newTerm("blah"),
Automata.makeString("foobar"));
assertEquals(a1.hashCode(), a2.hashCode());
assertEquals(a1, a2);
assertEquals(a1.hashCode(), a3.hashCode());
assertEquals(a1, a3);
// different class
AutomatonQuery w1 = new WildcardQuery(newTerm("foobar"));
// different class
AutomatonQuery w2 = new RegexpQuery(newTerm("foobar"));
assertFalse(a1.equals(w1));
assertFalse(a1.equals(w2));
assertFalse(w1.equals(w2));
assertFalse(a1.equals(a4));
assertFalse(a1.equals(a5));
assertFalse(a1.equals(null));
}
开发者ID:europeana,项目名称:search,代码行数:36,代码来源:TestAutomatonQuery.java
注:本文中的org.apache.lucene.util.automaton.Operations类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论