本文整理汇总了C#中Lucene.Net.Index.IndexWriter类的典型用法代码示例。如果您正苦于以下问题:C# IndexWriter类的具体用法?C# IndexWriter怎么用?C# IndexWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
IndexWriter类属于Lucene.Net.Index命名空间,在下文中一共展示了IndexWriter类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: SetUp
public void SetUp()
{
var writer = new IndexWriter(store, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
var doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Tom", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
}
开发者ID:hanabi1224,项目名称:lucene.net,代码行数:28,代码来源:TestLuceneDictionary.cs
示例2: TestMultiValueSource
public virtual void TestMultiValueSource()
{
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(f);
for (int i = 0; i < 17; i++)
{
f.SetValue("" + i);
w.AddDocument(doc);
w.Commit();
}
IndexReader r = w.GetReader();
w.Close();
Assert.IsTrue(r.GetSequentialSubReaders().Length > 1);
ValueSource s1 = new IntFieldSource("field");
DocValues v1 = s1.GetValues(r);
DocValues v2 = new MultiValueSource(s1).GetValues(r);
for (int i = 0; i < r.MaxDoc(); i++)
{
Assert.AreEqual(v1.IntVal(i), i);
Assert.AreEqual(v2.IntVal(i), i);
}
Lucene.Net.Search.FieldCache_Fields.DEFAULT.PurgeAllCaches();
r.Close();
dir.Close();
}
开发者ID:kstenson,项目名称:NHibernate.Search,代码行数:35,代码来源:TestValueSource.cs
示例3: MakeIndex
private static Directory MakeIndex()
{
Directory dir = new RAMDirectory();
try
{
System.Random r = new System.Random((System.Int32) (BASE_SEED + 42));
Analyzer analyzer = new SimpleAnalyzer();
IndexWriter writer = new IndexWriter(dir, analyzer, true);
writer.SetUseCompoundFile(false);
for (int d = 1; d <= NUM_DOCS; d++)
{
Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
for (int f = 1; f <= NUM_FIELDS; f++)
{
doc.Add(new Field("f" + f, data[f % data.Length] + '#' + data[r.Next(data.Length)], Field.Store.YES, Field.Index.TOKENIZED));
}
writer.AddDocument(doc);
}
writer.Close();
}
catch (System.Exception e)
{
throw new System.SystemException("", e);
}
return dir;
}
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:28,代码来源:TestLazyBug.cs
示例4: TestReadersWriters
public void TestReadersWriters()
{
Directory dir;
using(dir = new RAMDirectory())
{
Document doc;
IndexWriter writer;
IndexReader reader;
using (writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED))
{
Field field = new Field("name", "value", Field.Store.YES,Field.Index.ANALYZED);
doc = new Document();
doc.Add(field);
writer.AddDocument(doc);
writer.Commit();
using (reader = writer.GetReader())
{
IndexReader r1 = reader.Reopen();
}
Assert.Throws<AlreadyClosedException>(() => reader.Reopen(), "IndexReader shouldn't be open here");
}
Assert.Throws<AlreadyClosedException>(() => writer.AddDocument(doc), "IndexWriter shouldn't be open here");
Assert.IsTrue(dir.isOpen_ForNUnit, "RAMDirectory");
}
Assert.IsFalse(dir.isOpen_ForNUnit, "RAMDirectory");
}
开发者ID:hanabi1224,项目名称:lucene.net,代码行数:32,代码来源:TestIDisposable.cs
示例5: TestSorting
public virtual void TestSorting()
{
Directory directory = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(1000);
writer.AddDocument(Adoc(new System.String[]{"id", "a", "title", "ipod", "str_s", "a"}));
writer.AddDocument(Adoc(new System.String[]{"id", "b", "title", "ipod ipod", "str_s", "b"}));
writer.AddDocument(Adoc(new System.String[]{"id", "c", "title", "ipod ipod ipod", "str_s", "c"}));
writer.AddDocument(Adoc(new System.String[]{"id", "x", "title", "boosted", "str_s", "x"}));
writer.AddDocument(Adoc(new System.String[]{"id", "y", "title", "boosted boosted", "str_s", "y"}));
writer.AddDocument(Adoc(new System.String[]{"id", "z", "title", "boosted boosted boosted", "str_s", "z"}));
IndexReader r = writer.GetReader();
writer.Close();
IndexSearcher searcher = new IndexSearcher(r);
RunTest(searcher, true);
RunTest(searcher, false);
searcher.Close();
r.Close();
directory.Close();
}
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:25,代码来源:TestElevationComparator.cs
示例6: SetUp
/// <summary>
/// Set up a new index in RAM with three test phrases and the supplied Analyzer.
/// </summary>
/// <exception cref="Exception"> if an error occurs with index writer or searcher </exception>
public override void SetUp()
{
base.SetUp();
analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false), 2);
directory = NewDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc;
doc = new Document();
doc.Add(new TextField("content", "please divide this sentence into shingles", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new TextField("content", "just another test sentence", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new TextField("content", "a sentence which contains no test", Field.Store.YES));
writer.AddDocument(doc);
writer.Dispose();
reader = DirectoryReader.Open(directory);
searcher = NewSearcher(reader);
}
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:29,代码来源:ShingleAnalyzerWrapperTest.cs
示例7: Proc
static void Proc()
{
var uri = GetRandomWikiPage();
queue.Enqueue(uri);
using (var dir = new Lucene.Net.Store.SimpleFSDirectory(new DirectoryInfo("..\\..\\idx"))) {
using (var indexWriter = new IndexWriter(dir, new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30), new IndexWriter.MaxFieldLength(IndexWriter.DEFAULT_MAX_FIELD_LENGTH))) {
while (true) {
string page;
if (queue.TryDequeue(out page)) {
visited.AddOrUpdate(page, true, (p, b) => true);
try {
ProcessPage(page, indexWriter);
}
catch (Exception) {
Console.WriteLine("ERROR");
}
if (Console.KeyAvailable) {
var x = Console.ReadKey();
if (x.Key == ConsoleKey.Spacebar) {
break;
}
}
}
else {
break;
}
}
}
}
}
开发者ID:MarkPflug,项目名称:LuceneExperiment,代码行数:32,代码来源:Program.cs
示例8: SetUp
public void SetUp()
{
IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(), true);
Document doc;
doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Tom", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
}
开发者ID:kstenson,项目名称:NHibernate.Search,代码行数:30,代码来源:TestLuceneDictionary.cs
示例9: AddLuceneIndex
/// <summary>
/// 创建索引文档
/// </summary>
/// <param name="dic"></param>
public void AddLuceneIndex(Dictionary<string, string> dic) {
//var analyzer = new StandardAnalyzer(Version.LUCENE_30);
var analyzer = GetAnalyzer();
using (var directory = GetLuceneDirectory())
using (var writer = new IndexWriter(directory, analyzer, IndexWriter.MaxFieldLength.UNLIMITED)) {
var doc = new Document();
foreach (KeyValuePair<string, string> pair in dic) {
// add new index entry
//Field.Store.YES:表示是否存储原值。
//只有当Field.Store.YES在后面才能用doc.Get("number")取出值来
//Field.Index. NOT_ANALYZED:不进行分词保存
//todo:boost
if (NotAnalyzeFields.Exists(one => one == pair.Key)) {
doc.Add(new Field(pair.Key, pair.Value, Field.Store.YES, Field.Index.NOT_ANALYZED));
}
else {
doc.Add(new Field(pair.Key, pair.Value, Field.Store.YES, Field.Index.ANALYZED));
}
}
//doc.Boost
writer.AddDocument(doc);
writer.Commit();
writer.Optimize();
analyzer.Close();
}
}
开发者ID:kangwl,项目名称:KANG.Frame,代码行数:30,代码来源:DocIndex.cs
示例10: TestEmptyChildFilter
public void TestEmptyChildFilter()
{
Directory dir = NewDirectory();
IndexWriterConfig config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
config.SetMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
// we don't want to merge - since we rely on certain segment setup
IndexWriter w = new IndexWriter(dir, config);
IList<Document> docs = new List<Document>();
docs.Add(MakeJob("java", 2007));
docs.Add(MakeJob("python", 2010));
docs.Add(MakeResume("Lisa", "United Kingdom"));
w.AddDocuments(docs);
docs.Clear();
docs.Add(MakeJob("ruby", 2005));
docs.Add(MakeJob("java", 2006));
docs.Add(MakeResume("Frank", "United States"));
w.AddDocuments(docs);
w.Commit();
int num = AtLeast(10); // produce a segment that doesn't have a value in the docType field
for (int i = 0; i < num; i++)
{
docs.Clear();
docs.Add(MakeJob("java", 2007));
w.AddDocuments(docs);
}
IndexReader r = DirectoryReader.Open(w, Random().NextBoolean());
w.Dispose();
assertTrue(r.Leaves.size() > 1);
IndexSearcher s = new IndexSearcher(r);
Filter parentsFilter = new FixedBitSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
BooleanQuery childQuery = new BooleanQuery();
childQuery.Add(new BooleanClause(new TermQuery(new Term("skill", "java")), BooleanClause.Occur.MUST));
childQuery.Add(new BooleanClause(NumericRangeQuery.NewIntRange("year", 2006, 2011, true, true), BooleanClause.Occur.MUST));
ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, ScoreMode.Avg);
BooleanQuery fullQuery = new BooleanQuery();
fullQuery.Add(new BooleanClause(childJoinQuery, BooleanClause.Occur.MUST));
fullQuery.Add(new BooleanClause(new MatchAllDocsQuery(), BooleanClause.Occur.MUST));
ToParentBlockJoinCollector c = new ToParentBlockJoinCollector(Sort.RELEVANCE, 1, true, true);
s.Search(fullQuery, c);
TopGroups<int> results = c.GetTopGroups(childJoinQuery, null, 0, 10, 0, true);
assertFalse(float.IsNaN(results.MaxScore));
assertEquals(1, results.TotalGroupedHitCount);
assertEquals(1, results.Groups.Length);
IGroupDocs<int> group = results.Groups[0];
Document childDoc = s.Doc(group.ScoreDocs[0].Doc);
assertEquals("java", childDoc.Get("skill"));
assertNotNull(group.GroupValue);
Document parentDoc = s.Doc(group.GroupValue);
assertEquals("Lisa", parentDoc.Get("name"));
r.Dispose();
dir.Dispose();
}
开发者ID:apache,项目名称:lucenenet,代码行数:60,代码来源:TestBlockJoin.cs
示例11: AddData
private void AddData(IndexWriter writer)
{
AddPoint(writer, "McCormick & Schmick's Seafood Restaurant", 38.9579000, -77.3572000);
AddPoint(writer, "Jimmy's Old Town Tavern", 38.9690000, -77.3862000);
AddPoint(writer, "Ned Devine's", 38.9510000, -77.4107000);
AddPoint(writer, "Old Brogue Irish Pub", 38.9955000, -77.2884000);
AddPoint(writer, "Alf Laylah Wa Laylah", 38.8956000, -77.4258000);
AddPoint(writer, "Sully's Restaurant & Supper", 38.9003000, -77.4467000);
AddPoint(writer, "TGI Friday", 38.8725000, -77.3829000);
AddPoint(writer, "Potomac Swing Dance Club", 38.9027000, -77.2639000);
AddPoint(writer, "White Tiger Restaurant", 38.9027000, -77.2638000);
AddPoint(writer, "Jammin' Java", 38.9039000, -77.2622000);
AddPoint(writer, "Potomac Swing Dance Club", 38.9027000, -77.2639000);
AddPoint(writer, "WiseAcres Comedy Club", 38.9248000, -77.2344000);
AddPoint(writer, "Glen Echo Spanish Ballroom", 38.9691000, -77.1400000);
AddPoint(writer, "Whitlow's on Wilson", 38.8889000, -77.0926000);
AddPoint(writer, "Iota Club and Cafe", 38.8890000, -77.0923000);
AddPoint(writer, "Hilton Washington Embassy Row", 38.9103000, -77.0451000);
AddPoint(writer, "HorseFeathers, Bar & Grill", 39.01220000000001, -77.3942);
AddPoint(writer, "Marshall Island Airfield", 7.06, 171.2);
AddPoint(writer, "Midway Island", 25.7, -171.7);
AddPoint(writer, "North Pole Way", 55.0, 4.0);
writer.Commit();
writer.Close();
}
开发者ID:kstenson,项目名称:NHibernate.Search,代码行数:26,代码来源:TestCartesian.cs
示例12: CreateIndex
public void CreateIndex(Analyzer analayer)
{
FSDirectory fsDir = new SimpleFSDirectory(new DirectoryInfo(_indexerFolder));
IndexWriter indexWriter = new IndexWriter(fsDir, analayer, true, Lucene.Net.Index.IndexWriter.MaxFieldLength.UNLIMITED);
string[] files = System.IO.Directory.GetFiles(_textFilesFolder, Config.FileSearchPattern, SearchOption.AllDirectories);
foreach (string file in files)
{
string name = new FileInfo(file).Name;
string content = File.ReadAllText(file);
Document doc = new Document();
doc.Add(new Field(Config.Field_Path, file, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field(Config.Field_Name, name, Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field(Config.Field_Content, content, Field.Store.NO, Field.Index.ANALYZED));
indexWriter.AddDocument(doc);
Console.WriteLine("{0} - {1}", file, name);
}
indexWriter.Optimize();
indexWriter.Dispose();
Console.WriteLine("File count: {0}", files.Length);
}
开发者ID:NDChen,项目名称:MyDemoCode,代码行数:26,代码来源:IndexHelper.cs
示例13: TestSimpleSkip
public virtual void TestSimpleSkip()
{
Directory dir = new CountingRAMDirectory(this, new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).SetCodec(TestUtil.AlwaysPostingsFormat(new Lucene41PostingsFormat())).SetMergePolicy(NewLogMergePolicy()));
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++)
{
Document d1 = new Document();
d1.Add(NewTextField(term.Field(), term.Text(), Field.Store.NO));
writer.AddDocument(d1);
}
writer.Commit();
writer.ForceMerge(1);
writer.Dispose();
AtomicReader reader = GetOnlySegmentReader(DirectoryReader.Open(dir));
for (int i = 0; i < 2; i++)
{
Counter = 0;
DocsAndPositionsEnum tp = reader.TermPositionsEnum(term);
CheckSkipTo(tp, 14, 185); // no skips
CheckSkipTo(tp, 17, 190); // one skip on level 0
CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
// this test would fail if we had only one skip level,
// because than more bytes would be read from the freqStream
CheckSkipTo(tp, 4800, 250); // one skip on level 2
}
}
开发者ID:paulirwin,项目名称:lucene.net,代码行数:30,代码来源:TestMultiLevelSkipList.cs
示例14: MrsJones
public void MrsJones()
{
using (var dir = new RAMDirectory())
using (var analyzer = new LowerCaseKeywordAnalyzer())
{
using (var writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED))
{
var document = new Lucene.Net.Documents.Document();
document.Add(new Field("Name", "MRS. SHABA", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS));
writer.AddDocument(document);
}
var searcher = new IndexSearcher(dir, true);
var termEnum = searcher.IndexReader.Terms();
while (termEnum.Next())
{
var buffer = termEnum.Term.Text;
Console.WriteLine(buffer);
}
var queryParser = new RangeQueryParser(Version.LUCENE_29, "", analyzer);
var query = queryParser.Parse("Name:\"MRS. S*\"");
Console.WriteLine(query);
var result = searcher.Search(query, 10);
Assert.NotEqual(0, result.TotalHits);
}
}
开发者ID:j2jensen,项目名称:ravendb,代码行数:29,代码来源:LuceneIndexing.cs
示例15: IndexAndCrashOnCreateOutputSegments2
/// <summary>
/// index 1 document and commit.
/// prepare for crashing.
/// index 1 more document, and upon commit, creation of segments_2 will crash.
/// </summary>
private void IndexAndCrashOnCreateOutputSegments2()
{
Directory realDirectory = FSDirectory.Open(Path);
CrashAfterCreateOutput crashAfterCreateOutput = new CrashAfterCreateOutput(realDirectory);
// NOTE: cannot use RandomIndexWriter because it
// sometimes commits:
IndexWriter indexWriter = new IndexWriter(crashAfterCreateOutput, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
indexWriter.AddDocument(Document);
// writes segments_1:
indexWriter.Commit();
crashAfterCreateOutput.GetCrashAfterCreateOutput = "segments_2";
indexWriter.AddDocument(Document);
try
{
// tries to write segments_2 but hits fake exc:
indexWriter.Commit();
Assert.Fail("should have hit CrashingException");
}
catch (CrashingException e)
{
// expected
}
// writes segments_3
indexWriter.Dispose();
Assert.IsFalse(SlowFileExists(realDirectory, "segments_2"));
crashAfterCreateOutput.Dispose();
}
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:35,代码来源:TestCrashCausesCorruptIndex.cs
示例16: AddDocumentToIndex
/// <summary>
///
/// </summary>
/// <param name="p"></param>
/// <param name="writer"></param>
private static void AddDocumentToIndex(Product p, IndexWriter writer)
{
Document doc = new Document();
doc.Add(new Field("Name",
p.Name,
Field.Store.YES,
Field.Index.ANALYZED,
Lucene.Net.Documents.Field.TermVector.YES
)
);
doc.Add(new Field("Origin",
p.Origin.ToString(),
Field.Store.YES,
Field.Index.ANALYZED,
Lucene.Net.Documents.Field.TermVector.YES
)
);
doc.Add(new Field("Price",
p.Price.ToString(),
Field.Store.YES,
Field.Index.ANALYZED,
Lucene.Net.Documents.Field.TermVector.YES
)
);
writer.AddDocument(doc);
}
开发者ID:avinashbhujan,项目名称:SalesTaxObserverPatternWithLucene,代码行数:31,代码来源:ProductIndexer.cs
示例17: SetUp
public override void SetUp()
{
base.SetUp();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
//writer.setUseCompoundFile(true);
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++)
{
Document doc = new Document();
Field.TermVector termVector;
int mod3 = i % 3;
int mod2 = i % 2;
if (mod2 == 0 && mod3 == 0)
{
termVector = Field.TermVector.WITH_POSITIONS_OFFSETS;
}
else if (mod2 == 0)
{
termVector = Field.TermVector.WITH_POSITIONS;
}
else if (mod3 == 0)
{
termVector = Field.TermVector.WITH_OFFSETS;
}
else
{
termVector = Field.TermVector.YES;
}
doc.Add(new Field("field", English.IntToEnglish(i), Field.Store.YES, Field.Index.ANALYZED, termVector));
writer.AddDocument(doc);
}
writer.Close();
searcher = new IndexSearcher(directory, true);
}
开发者ID:Nangal,项目名称:lucene.net,代码行数:34,代码来源:TestTermVectors.cs
示例18: IndexIndicator
private static void IndexIndicator(IndicatorMetadata indicatorMetadata,
IEnumerable<IndicatorMetadataTextProperty> properties, IndexWriter writer)
{
Document doc = new Document();
doc.Add(new Field("id", indicatorMetadata.IndicatorId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
var text = indicatorMetadata.Descriptive;
StringBuilder sb = new StringBuilder();
foreach (var indicatorMetadataTextProperty in properties)
{
var key = indicatorMetadataTextProperty.ColumnName;
if (text.ContainsKey(key))
{
sb.Append(text[key]);
sb.Append(" ");
}
}
doc.Add(new Field("IndicatorText",
sb.ToString().ToLower(), Field.Store.NO,
Field.Index.ANALYZED));
writer.AddDocument(doc);
}
开发者ID:PublicHealthEngland,项目名称:fingertips-open,代码行数:26,代码来源:IndicatorSearchIndexBuilder.cs
示例19: TestTermEnum
public virtual void TestTermEnum()
{
IndexWriter writer = null;
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
// add 100 documents with term : aaa
// add 100 documents with terms: aaa bbb
// Therefore, term 'aaa' has document frequency of 200 and term 'bbb' 100
for (int i = 0; i < 100; i++)
{
AddDoc(writer, "aaa");
AddDoc(writer, "aaa bbb");
}
writer.Close();
// verify document frequency of terms in an unoptimized index
VerifyDocFreq();
// merge segments by optimizing the index
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
writer.Optimize();
writer.Close();
// verify document frequency of terms in an optimized index
VerifyDocFreq();
}
开发者ID:vikasraz,项目名称:indexsearchutils,代码行数:28,代码来源:TestSegmentTermEnum.cs
示例20: AddTextToIndex
private static void AddTextToIndex(int txts, string text, IndexWriter writer)
{
Document doc = new Document();
doc.Add(new Field("id", txts.ToString(), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.Add(new Field("postBody", text, Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
}
开发者ID:aragorn55,项目名称:codeclimber,代码行数:7,代码来源:Program.cs
注:本文中的Lucene.Net.Index.IndexWriter类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论