本文整理汇总了C#中Lucene.Net.Analysis.Analyzer类的典型用法代码示例。如果您正苦于以下问题:C# Analyzer类的具体用法?C# Analyzer怎么用?C# Analyzer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Analyzer类属于Lucene.Net.Analysis命名空间,在下文中一共展示了Analyzer类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: LuceneTesterBase
public LuceneTesterBase(LuceneDirectory directory, LuceneAnalyzer analyzer, LuceneVersion version)
{
Analyzer = analyzer;
CurrentLuceneVersion = version;
IndexDirectory = directory;
Debug = false;
}
开发者ID:joshball,项目名称:Lucene.In.Action.NET,代码行数:7,代码来源:LuceneTesterBase.cs
示例2: SetAnalyzerType
void SetAnalyzerType(Type defaultType, IEnumerable<FieldDetails> fields)
{
if (defaultType == null) {
defaultType = typeof(StandardAnalyzer);
}
// create default analyzer
_defaultAnalyzer = Activator.CreateInstance(defaultType) as Analyzer;
if (_defaultAnalyzer == null) {
throw new ArgumentException("defaultType is not an Analyzer type");
}
var wrapper = new PerFieldAnalyzerWrapper(_defaultAnalyzer);
if (fields != null) {
foreach (var fd in fields) {
if (fd.Field.Analyzer!=null) {
var fieldAnalyzer = CreateAnalyzerFromType(fd.Field.Analyzer);
if (fieldAnalyzer != null) {
wrapper.AddAnalyzer(fd.Name, fieldAnalyzer);
}
}
}
}
Analyzer = wrapper;
}
开发者ID:Toolate,项目名称:dotSearch,代码行数:26,代码来源:DocumentDetails.cs
示例3: CreateSearchIndex
public void CreateSearchIndex()
{
directory = new RAMDirectory();
analyzer = new StandardAnalyzer(Version.LUCENE_30);
var ixw = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
LookupTable = new Dictionary<string, BaseContent>();
foreach (BaseContent p in Service.PoIs.ToList())
{
var document = new Document();
document.Add(new Field("id", p.Id.ToString(), Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
string all = p.Name + " ";
foreach (MetaInfo mi in p.EffectiveMetaInfo)
{
string value;
if (mi.Type != MetaTypes.text || !p.Labels.TryGetValue(mi.Label, out value)) continue;
document.Add(new Field(mi.Label, value, Field.Store.YES, Field.Index.ANALYZED));
all += value + " ";
}
document.Add(new Field("All", all, Field.Store.YES, Field.Index.ANALYZED));
LookupTable[p.Id.ToString()] = p;
ixw.AddDocument(document);
}
ixw.Commit();
}
开发者ID:TNOCS,项目名称:csTouch,代码行数:25,代码来源:LuceneSearch.cs
示例4: PreProcessUntokenizedTerms
/// <summary>
/// Detects untokenized fields and sets as NotAnalyzed in analyzer
/// </summary>
private static string PreProcessUntokenizedTerms(PerFieldAnalyzerWrapper analyzer, string query, Analyzer keywordAnlyzer)
{
var untokenizedMatches = untokenizedQuery.Matches(query);
if (untokenizedMatches.Count < 1)
{
return query;
}
var sb = new StringBuilder(query);
// KeywordAnalyzer will not tokenize the values
// process in reverse order to leverage match string indexes
for (int i=untokenizedMatches.Count; i>0; i--)
{
Match match = untokenizedMatches[i-1];
// specify that term for this field should not be tokenized
analyzer.AddAnalyzer(match.Groups[1].Value, keywordAnlyzer);
Group term = match.Groups[2];
// remove enclosing "[[" "]]" from term value (again in reverse order)
sb.Remove(term.Index+term.Length-2, 2);
sb.Remove(term.Index, 2);
}
return sb.ToString();
}
开发者ID:jlundstocholm,项目名称:ravendb,代码行数:32,代码来源:QueryBuilder.cs
示例5: LuceneSearcher
public LuceneSearcher(DirectoryInfo workingFolder, Analyzer analyzer)
: base(analyzer)
{
_disposer = new DisposableSearcher(this);
LuceneIndexFolder = new DirectoryInfo(Path.Combine(workingFolder.FullName, "Index"));
InitializeDirectory();
}
开发者ID:jclementson,项目名称:Examine,代码行数:7,代码来源:LuceneSearcher.cs
示例6: LuceneSearcher
public LuceneSearcher(IndexWriter writer, Analyzer analyzer)
: base(analyzer)
{
if (writer == null) throw new ArgumentNullException("writer");
_disposer = new DisposableSearcher(this);
_nrtWriter = writer;
}
开发者ID:snowattitudes,项目名称:Examine,代码行数:7,代码来源:LuceneSearcher.cs
示例7: TestFarsiRangeFilterCollating
public virtual void TestFarsiRangeFilterCollating(Analyzer analyzer, BytesRef firstBeg, BytesRef firstEnd, BytesRef secondBeg, BytesRef secondEnd)
{
Directory dir = NewDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.Add(new TextField("content", "\u0633\u0627\u0628", Field.Store.YES));
doc.Add(new StringField("body", "body", Field.Store.YES));
writer.AddDocument(doc);
writer.Dispose();
IndexReader reader = DirectoryReader.Open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("body", "body"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a TermRangeFilter with a Farsi
// Collator (or an Arabic one for the case when Farsi searcher not
// supported).
ScoreDoc[] result = searcher.Search(query, new TermRangeFilter("content", firstBeg, firstEnd, true, true), 1).ScoreDocs;
Assert.AreEqual(0, result.Length, "The index Term should not be included.");
result = searcher.Search(query, new TermRangeFilter("content", secondBeg, secondEnd, true, true), 1).ScoreDocs;
Assert.AreEqual(1, result.Length, "The index Term should be included.");
reader.Dispose();
dir.Dispose();
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:27,代码来源:CollationTestBase.cs
示例8: ObjectQueryTranslator
public ObjectQueryTranslator(string[] defaultFieldNames, Analyzer defaultAnalyzer)
{
this._defaultAnalyzer = defaultAnalyzer;
this._defaultFieldNames = defaultFieldNames;
_root = new BooleanQuery();
}
开发者ID:Toolate,项目名称:dotSearch,代码行数:7,代码来源:ObjectQueryTranslator.cs
示例9: CreateIndex
public void CreateIndex(Analyzer analayer)
{
FSDirectory fsDir = new SimpleFSDirectory(new DirectoryInfo(_indexerFolder));
IndexWriter indexWriter = new IndexWriter(fsDir, analayer, true, Lucene.Net.Index.IndexWriter.MaxFieldLength.UNLIMITED);
string[] files = System.IO.Directory.GetFiles(_textFilesFolder, Config.FileSearchPattern, SearchOption.AllDirectories);
foreach (string file in files)
{
string name = new FileInfo(file).Name;
string content = File.ReadAllText(file);
Document doc = new Document();
doc.Add(new Field(Config.Field_Path, file, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field(Config.Field_Name, name, Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field(Config.Field_Content, content, Field.Store.NO, Field.Index.ANALYZED));
indexWriter.AddDocument(doc);
Console.WriteLine("{0} - {1}", file, name);
}
indexWriter.Optimize();
indexWriter.Dispose();
Console.WriteLine("File count: {0}", files.Length);
}
开发者ID:NDChen,项目名称:MyDemoCode,代码行数:26,代码来源:IndexHelper.cs
示例10: InstancePerFieldAnalyzerWrapper
public InstancePerFieldAnalyzerWrapper()
{
var analyzer = new Lucene.Net.Analysis.PerFieldAnalyzerWrapper(new Synonyms.SynonymAnalyzer(new Synonyms.XmlSynonymEngine()));
analyzer.AddAnalyzer("cota", new Lucene.Net.Analysis.KeywordAnalyzer());
analyzer.AddAnalyzer("codigo", new Lucene.Net.Analysis.KeywordAnalyzer());
instancePerFieldAnalyzerWrapper = analyzer;
}
开发者ID:aureliopires,项目名称:gisa,代码行数:7,代码来源:NivelDocumentalSearcher.cs
示例11: GetParser
public virtual QueryParser GetParser(Analyzer a)
{
if (a == null) a = new MockAnalyzer(Random(), MockTokenizer.SIMPLE, true);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, DefaultField, a);
qp.DefaultOperator = (QueryParserBase.OR_OPERATOR);
return qp;
}
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:7,代码来源:TestQueryParser.cs
示例12: WriteIndex
public static TimeSpan WriteIndex(Analyzer analyzer,IndexerSet indexer, Source source,bool create)
{
try
{
//ChineseSegAnalysis csa = new ChineseSegAnalysis(index.BasePath, index.NamePath, index.NumberPath, index.CustomPaths);
//csa.FilterFilePath = index.FilterPath;
//Analyzer analyzer = csa.GetAnalyzer();
string connect = source.GetConnString();
DateTime start;
if (create)
{
DBCreateIndexer dbcIndexer = new DBCreateIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
dbcIndexer.PrimaryKey = source.PrimaryKey;
start = DateTime.Now;
dbcIndexer.WriteResults(source.Query,indexer.MaxFieldLength,indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);
return DateTime.Now - start;
}
else
{
DBIncremIndexer dbiIndexer = new DBIncremIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
dbiIndexer.PrimaryKey = source.PrimaryKey;
start = DateTime.Now;
dbiIndexer.WriteResults(source.Query, indexer.MaxFieldLength, indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);
return DateTime.Now - start;
}
}
catch (Exception e)
{
throw e;
}
}
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:31,代码来源:IWriter.cs
示例13: WriteIndexWithEvent
public static TimeSpan WriteIndexWithEvent(Analyzer analyzer, IndexerSet indexer, Source source, bool create,
IndexCompletedEventHandler OnIndexCompleted,
IndexProgressChangedEventHandler OnProgressChanged)
{
try
{
//ChineseSegAnalysis csa = new ChineseSegAnalysis(index.BasePath, index.NamePath, index.NumberPath, index.CustomPaths);
//csa.FilterFilePath = index.FilterPath;
//Analyzer analyzer = csa.GetAnalyzer();
string connect = source.GetConnString();
DateTime start;
if (create)
{
DBRamCreateIndexer dbcIndexer = new DBRamCreateIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
dbcIndexer.OnIndexCompleted += OnIndexCompleted;
dbcIndexer.OnProgressChanged += OnProgressChanged;
start = DateTime.Now;
dbcIndexer.WriteResultsWithEvent(source.Query, indexer.MaxFieldLength, indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);
return DateTime.Now - start;
}
else
{
DBRamIncremIndexer dbiIndexer = new DBRamIncremIndexer(analyzer, source.DBType, connect, index.Path,index.Caption);
dbiIndexer.OnIndexCompleted += OnIndexCompleted;
dbiIndexer.OnProgressChanged += OnProgressChanged;
start = DateTime.Now;
dbiIndexer.WriteResultsWithEvent(source.Query, indexer.MaxFieldLength, indexer.RamBufferSize, indexer.MergeFactor, indexer.MaxBufferedDocs);
return DateTime.Now - start;
}
}
catch (Exception e)
{
throw e;
}
}
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:35,代码来源:RamIWriter.cs
示例14: LuceneSearcher
public LuceneSearcher(Lucene.Net.Store.Directory luceneDirectory, Analyzer analyzer)
: base(analyzer)
{
_disposer = new DisposableSearcher(this);
LuceneIndexFolder = null;
_luceneDirectory = luceneDirectory;
}
开发者ID:bowserm,项目名称:Examine,代码行数:7,代码来源:LuceneSearcher.cs
示例15: InitSearchServiceAnalyzer
private void InitSearchServiceAnalyzer(Type indexingServiceSettingsType, Analyzer defaultAnalyzer, Analyzer textAnalyzer)
{
var perFieldAnalyzerWrapper = new PerFieldAnalyzerWrapper(defaultAnalyzer);
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_ID", new KeywordAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_CULTURE", new KeywordAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_REFERENCEID", new KeywordAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_AUTHORSTORAGE", new KeywordAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_CATEGORIES", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_ACL", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_VIRTUALPATH", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_TYPE", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_CREATED", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_MODIFIED", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_PUBLICATIONEND", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_PUBLICATIONSTART", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_ITEMSTATUS", new WhitespaceAnalyzer());
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_TITLE", textAnalyzer);
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_DISPLAYTEXT", textAnalyzer);
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_AUTHORS", textAnalyzer);
perFieldAnalyzerWrapper.AddAnalyzer("EPISERVER_SEARCH_DEFAULT", textAnalyzer);
indexingServiceSettingsType
.GetField("_analyzer", BindingFlags.Static | BindingFlags.NonPublic)
.SetValue(null, perFieldAnalyzerWrapper);
}
开发者ID:javafun,项目名称:EPiSearch,代码行数:26,代码来源:SearchCustomizer.cs
示例16: GetUmbracoIndexer
public static UmbracoContentIndexer GetUmbracoIndexer(
Lucene.Net.Store.Directory luceneDir,
Analyzer analyzer = null,
IDataService dataService = null)
{
if (dataService == null)
{
dataService = new TestDataService();
}
if (analyzer == null)
{
analyzer = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_29);
}
var indexSet = new IndexSet();
var indexCriteria = indexSet.ToIndexCriteria(dataService, UmbracoContentIndexer.IndexFieldPolicies);
var i = new UmbracoContentIndexer(indexCriteria,
luceneDir, //custom lucene directory
dataService,
analyzer,
false);
//i.IndexSecondsInterval = 1;
i.IndexingError += IndexingError;
return i;
}
开发者ID:ChrisNikkel,项目名称:Umbraco-CMS,代码行数:30,代码来源:IndexInitializer.cs
示例17: Init
public void Init()
{
facetHandlers = new List<IFacetHandler>();
directory = new RAMDirectory();
analyzer = new WhitespaceAnalyzer();
selectionProperties = new Dictionary<string, string>();
IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
writer.AddDocument(Doc("prop1=val1", "prop2=val1", "prop5=val1"));
writer.AddDocument(Doc("prop1=val2", "prop3=val1", "prop7=val7"));
writer.AddDocument(Doc("prop1=val2", "prop3=val2", "prop3=val3"));
writer.AddDocument(Doc("prop1=val1", "prop2=val1"));
writer.AddDocument(Doc("prop1=val1", "prop2=val1"));
writer.AddDocument(Doc("prop1=val1", "prop2=val1", "prop4=val2", "prop4=val3"));
writer.Commit();
attributesFacetHandler = new AttributesFacetHandler(AttributeHandlerName, AttributeHandlerName, null, null,
new Dictionary<string, string>());
facetHandlers.Add(attributesFacetHandler);
IndexReader reader = IndexReader.Open(directory, true);
boboReader = BoboIndexReader.GetInstance(reader, facetHandlers);
attributesFacetHandler.LoadFacetData(boboReader);
browser = new BoboBrowser(boboReader);
}
开发者ID:yao-yi,项目名称:BoboBrowse.Net,代码行数:25,代码来源:AttributesFacetHandlerTest.cs
示例18: Process
public override IQueryNode Process(IQueryNode queryTree)
{
Analyzer analyzer = GetQueryConfigHandler().Get(ConfigurationKeys.ANALYZER);
if (analyzer != null)
{
this.analyzer = analyzer;
this.positionIncrementsEnabled = false;
bool? positionIncrementsEnabled = GetQueryConfigHandler().Get(ConfigurationKeys.ENABLE_POSITION_INCREMENTS);
var defaultOperator = GetQueryConfigHandler().Get(ConfigurationKeys.DEFAULT_OPERATOR);
this.defaultOperator = defaultOperator != null ? defaultOperator.Value : Operator.OR;
if (positionIncrementsEnabled != null)
{
this.positionIncrementsEnabled = positionIncrementsEnabled.Value;
}
if (this.analyzer != null)
{
return base.Process(queryTree);
}
}
return queryTree;
}
开发者ID:apache,项目名称:lucenenet,代码行数:25,代码来源:AnalyzerQueryNodeProcessor.cs
示例19: FormSimilarQuery
/// <summary> Simple similarity query generators.
/// Takes every unique word and forms a boolean query where all words are optional.
/// After you get this you'll use to to query your <see cref="IndexSearcher"/> for similar docs.
/// The only caveat is the first hit returned <b>should be</b> your source document - you'll
/// need to then ignore that.
///
/// <p/>
///
/// So, if you have a code fragment like this:
/// <br/>
/// <code>
/// Query q = formSimilaryQuery( "I use Lucene to search fast. Fast searchers are good", new StandardAnalyzer(), "contents", null);
/// </code>
///
/// <p/>
///
/// The query returned, in string form, will be <c>'(i use lucene to search fast searchers are good')</c>.
///
/// <p/>
/// The philosophy behind this method is "two documents are similar if they share lots of words".
/// Note that behind the scenes, Lucenes scoring algorithm will tend to give two documents a higher similarity score if the share more uncommon words.
///
/// <P/>
/// This method is fail-safe in that if a long 'body' is passed in and
/// <see cref="BooleanQuery.Add"/> (used internally)
/// throws
/// <see cref="BooleanQuery.TooManyClauses"/>, the
/// query as it is will be returned.
/// </summary>
/// <param name="body">the body of the document you want to find similar documents to
/// </param>
/// <param name="a">the analyzer to use to parse the body
/// </param>
/// <param name="field">the field you want to search on, probably something like "contents" or "body"
/// </param>
/// <param name="stop">optional set of stop words to ignore
/// </param>
/// <returns> a query with all unique words in 'body'
/// </returns>
/// <throws> IOException this can't happen... </throws>
public static Query FormSimilarQuery(System.String body, Analyzer a, System.String field, ISet<string> stop)
{
TokenStream ts = a.TokenStream(field, new System.IO.StringReader(body));
ITermAttribute termAtt = ts.AddAttribute<ITermAttribute>();
BooleanQuery tmp = new BooleanQuery();
ISet<string> already = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<string>(); // ignore dups
while (ts.IncrementToken())
{
String word = termAtt.Term;
// ignore opt stop words
if (stop != null && stop.Contains(word))
continue;
// ignore dups
if (already.Contains(word))
continue;
already.Add(word);
// add to query
TermQuery tq = new TermQuery(new Term(field, word));
try
{
tmp.Add(tq, Occur.SHOULD);
}
catch (BooleanQuery.TooManyClauses)
{
// fail-safe, just return what we have, not the end of the world
break;
}
}
return tmp;
}
开发者ID:raol,项目名称:lucene.net,代码行数:71,代码来源:SimilarityQueries.cs
示例20: CoreParser
protected CoreParser(string defaultField, Analyzer analyzer, QueryParser parser)
{
this.analyzer = analyzer;
this.parser = parser;
filterFactory = new FilterBuilderFactory();
filterFactory.AddBuilder("RangeFilter", new RangeFilterBuilder());
filterFactory.AddBuilder("NumericRangeFilter", new NumericRangeFilterBuilder());
queryFactory = new QueryBuilderFactory();
queryFactory.AddBuilder("TermQuery", new TermQueryBuilder());
queryFactory.AddBuilder("TermsQuery", new TermsQueryBuilder(analyzer));
queryFactory.AddBuilder("MatchAllDocsQuery", new MatchAllDocsQueryBuilder());
queryFactory.AddBuilder("BooleanQuery", new BooleanQueryBuilder(queryFactory));
queryFactory.AddBuilder("NumericRangeQuery", new NumericRangeQueryBuilder());
queryFactory.AddBuilder("DisjunctionMaxQuery", new DisjunctionMaxQueryBuilder(queryFactory));
if (parser != null)
{
queryFactory.AddBuilder("UserQuery", new UserInputQueryBuilder(parser));
}
else
{
queryFactory.AddBuilder("UserQuery", new UserInputQueryBuilder(defaultField, analyzer));
}
queryFactory.AddBuilder("FilteredQuery", new FilteredQueryBuilder(filterFactory, queryFactory));
queryFactory.AddBuilder("ConstantScoreQuery", new ConstantScoreQueryBuilder(filterFactory));
filterFactory.AddBuilder("CachedFilter", new CachedFilterBuilder(queryFactory,
filterFactory, maxNumCachedFilters));
SpanQueryBuilderFactory sqof = new SpanQueryBuilderFactory();
SpanNearBuilder snb = new SpanNearBuilder(sqof);
sqof.AddBuilder("SpanNear", snb);
queryFactory.AddBuilder("SpanNear", snb);
BoostingTermBuilder btb = new BoostingTermBuilder();
sqof.AddBuilder("BoostingTermQuery", btb);
queryFactory.AddBuilder("BoostingTermQuery", btb);
SpanTermBuilder snt = new SpanTermBuilder();
sqof.AddBuilder("SpanTerm", snt);
queryFactory.AddBuilder("SpanTerm", snt);
SpanOrBuilder sot = new SpanOrBuilder(sqof);
sqof.AddBuilder("SpanOr", sot);
queryFactory.AddBuilder("SpanOr", sot);
SpanOrTermsBuilder sots = new SpanOrTermsBuilder(analyzer);
sqof.AddBuilder("SpanOrTerms", sots);
queryFactory.AddBuilder("SpanOrTerms", sots);
SpanFirstBuilder sft = new SpanFirstBuilder(sqof);
sqof.AddBuilder("SpanFirst", sft);
queryFactory.AddBuilder("SpanFirst", sft);
SpanNotBuilder snot = new SpanNotBuilder(sqof);
sqof.AddBuilder("SpanNot", snot);
queryFactory.AddBuilder("SpanNot", snot);
}
开发者ID:apache,项目名称:lucenenet,代码行数:59,代码来源:CoreParser.cs
注:本文中的Lucene.Net.Analysis.Analyzer类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论