本文整理汇总了C#中Lucene.Net.Index.MergePolicy类的典型用法代码示例。如果您正苦于以下问题:C# MergePolicy类的具体用法?C# MergePolicy怎么用?C# MergePolicy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MergePolicy类属于Lucene.Net.Index命名空间,在下文中一共展示了MergePolicy类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: MergeDocIDRemapper
internal int docShift; // total # deleted docs that were compacted by this merge
public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
{
this.docMaps = docMaps;
SegmentInfo firstSegment = merge.segments.Info(0);
int i = 0;
while (true)
{
SegmentInfo info = infos.Info(i);
if (info.Equals(firstSegment))
break;
minDocID += info.docCount;
i++;
}
int numDocs = 0;
for (int j = 0; j < docMaps.Length; i++, j++)
{
numDocs += infos.Info(i).docCount;
System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
}
maxDocID = minDocID + numDocs;
starts = new int[docMaps.Length];
newStarts = new int[docMaps.Length];
starts[0] = minDocID;
newStarts[0] = minDocID;
for (i = 1; i < docMaps.Length; i++)
{
int lastDocCount = merge.segments.Info(i - 1).docCount;
starts[i] = starts[i - 1] + lastDocCount;
newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
}
docShift = numDocs - mergedDocCount;
// There are rare cases when docShift is 0. It happens
// if you try to delete a docID that's out of bounds,
// because the SegmentReader still allocates deletedDocs
// and pretends it has deletions ... so we can't make
// this assert here
// assert docShift > 0;
// Make sure it all adds up:
System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
}
开发者ID:modulexcite,项目名称:Xamarin-Lucene.Net,代码行数:47,代码来源:MergeDocIDRemapper.cs
示例2: AddMergeException
internal virtual void AddMergeException(MergePolicy.OneMerge merge)
{
lock (this)
{
System.Diagnostics.Debug.Assert(merge.GetException() != null);
if (!mergeExceptions.Contains(merge) && mergeGen == merge.mergeGen)
mergeExceptions.Add(merge);
}
}
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:9,代码来源:IndexWriter.cs
示例3: DoMerge
protected override void DoMerge(MergePolicy.OneMerge merge)
{
TotMergedBytes += merge.TotalBytesSize();
base.DoMerge(merge);
}
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:5,代码来源:TestConcurrentMergeScheduler.cs
示例4: CheckAbort
public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
{
this.merge = merge;
this.dir = dir;
}
开发者ID:Rationalle,项目名称:ravendb,代码行数:5,代码来源:SegmentMerger.cs
示例5: UpgradeIndexMergePolicy
/// <summary>
/// Wrap the given <seealso cref="MergePolicy"/> and intercept forceMerge requests to
/// only upgrade segments written with previous Lucene versions.
/// </summary>
public UpgradeIndexMergePolicy(MergePolicy @base)
{
[email protected] = @base;
}
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:8,代码来源:UpgradeIndexMergePolicy.cs
示例6: RemapDeletes
/// <summary>Called whenever a merge has completed and the merged segments had deletions </summary>
internal void RemapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
{
lock (this)
{
if (docMaps == null)
// The merged segments had no deletes so docIDs did not change and we have nothing to do
return ;
MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
deletesInRAM.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
deletesFlushed.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
flushedDocCount -= mapper.docShift;
}
}
开发者ID:Inzaghi2012,项目名称:teamlab.v7.5,代码行数:14,代码来源:DocumentsWriter.cs
示例7: MergeThread
public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
{
InitBlock(enclosingInstance);
this.writer = writer;
this.startMerge = startMerge;
}
开发者ID:jhuntsman,项目名称:FlexNet,代码行数:6,代码来源:ConcurrentMergeScheduler.cs
示例8: DoMerge
/// <summary>Does the actual merge, by calling {@link IndexWriter#merge} </summary>
protected internal virtual void DoMerge(MergePolicy.OneMerge merge)
{
writer.Merge(merge);
}
开发者ID:jhuntsman,项目名称:FlexNet,代码行数:5,代码来源:ConcurrentMergeScheduler.cs
示例9: Merge
/// <summary>
/// Merges the indicated segments, replacing them in the stack with a
/// single segment.
///
/// @lucene.experimental
/// </summary>
public virtual void Merge(MergePolicy.OneMerge merge)
{
bool success = false;
long t0 = DateTime.Now.Millisecond;
try
{
try
{
try
{
MergeInit(merge);
//if (merge.info != null) {
//System.out.println("MERGE: " + merge.info.info.name);
//}
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "now merge\n merge=" + SegString(merge.Segments) + "\n index=" + SegString());
}
MergeMiddle(merge);
MergeSuccess(merge);
success = true;
}
catch (Exception t)
{
HandleMergeException(t, merge);
}
}
finally
{
lock (this)
{
MergeFinish(merge);
if (!success)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "hit exception during merge");
}
if (merge.Info_Renamed != null && !segmentInfos.Contains(merge.Info_Renamed))
{
Deleter.Refresh(merge.Info_Renamed.Info.Name);
}
}
// this merge (and, generally, any change to the
// segments) may now enable new merges, so we call
// merge policy & update pending merges.
if (success && !merge.Aborted && (merge.MaxNumSegments != -1 || (!closed && !Closing)))
{
UpdatePendingMerges(MergeTrigger.MERGE_FINISHED, merge.MaxNumSegments);
}
}
}
}
catch (System.OutOfMemoryException oom)
{
HandleOOM(oom, "merge");
}
if (merge.Info_Renamed != null && !merge.Aborted)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "merge time " + (DateTime.Now.Millisecond - t0) + " msec for " + merge.Info_Renamed.Info.DocCount + " docs");
}
}
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:77,代码来源:IndexWriter.cs
示例10: HandleMergeException
private void HandleMergeException(Exception t, MergePolicy.OneMerge merge)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "handleMergeException: merge=" + SegString(merge.Segments) + " exc=" + t);
}
// Set the exception on the merge, so if
// forceMerge is waiting on us it sees the root
// cause exception:
merge.Exception = t;
AddMergeException(merge);
if ((t as MergePolicy.MergeAbortedException) != null)
{
// We can ignore this exception (it happens when
// close(false) or rollback is called), unless the
// merge involves segments from external directories,
// in which case we must throw it so, for example, the
// rollbackTransaction code in addIndexes* is
// executed.
if (merge.IsExternal)
{
throw t;
}
}
else
{
IOUtils.ReThrow(t);
}
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:31,代码来源:IndexWriter.cs
示例11: CommitMerge
private bool CommitMerge(MergePolicy.OneMerge merge, MergeState mergeState)
{
lock (this)
{
Debug.Assert(TestPoint("startCommitMerge"));
if (HitOOM)
{
throw new InvalidOperationException("this writer hit an OutOfMemoryError; cannot complete merge");
}
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "commitMerge: " + SegString(merge.Segments) + " index=" + SegString());
}
Debug.Assert(merge.RegisterDone);
// If merge was explicitly aborted, or, if rollback() or
// rollbackTransaction() had been called since our merge
// started (which results in an unqualified
// deleter.refresh() call that will remove any index
// file that current segments does not reference), we
// abort this merge
if (merge.Aborted)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "commitMerge: skip: it was aborted");
}
// In case we opened and pooled a reader for this
// segment, drop it now. this ensures that we close
// the reader before trying to delete any of its
// files. this is not a very big deal, since this
// reader will never be used by any NRT reader, and
// another thread is currently running close(false)
// so it will be dropped shortly anyway, but not
// doing this makes MockDirWrapper angry in
// TestNRTThreads (LUCENE-5434):
readerPool.Drop(merge.Info_Renamed);
Deleter.DeleteNewFiles(merge.Info_Renamed.Files());
return false;
}
ReadersAndUpdates mergedUpdates = merge.Info_Renamed.Info.DocCount == 0 ? null : CommitMergedDeletesAndUpdates(merge, mergeState);
// System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMerge: mergedDeletes=" + mergedDeletes);
// If the doc store we are using has been closed and
// is in now compound format (but wasn't when we
// started), then we will switch to the compound
// format as well:
Debug.Assert(!segmentInfos.Contains(merge.Info_Renamed));
bool allDeleted = merge.Segments.Count == 0 || merge.Info_Renamed.Info.DocCount == 0 || (mergedUpdates != null && mergedUpdates.PendingDeleteCount == merge.Info_Renamed.Info.DocCount);
if (infoStream.IsEnabled("IW"))
{
if (allDeleted)
{
infoStream.Message("IW", "merged segment " + merge.Info_Renamed + " is 100% deleted" + (KeepFullyDeletedSegments_Renamed ? "" : "; skipping insert"));
}
}
bool dropSegment = allDeleted && !KeepFullyDeletedSegments_Renamed;
// If we merged no segments then we better be dropping
// the new segment:
Debug.Assert(merge.Segments.Count > 0 || dropSegment);
Debug.Assert(merge.Info_Renamed.Info.DocCount != 0 || KeepFullyDeletedSegments_Renamed || dropSegment);
if (mergedUpdates != null)
{
bool success = false;
try
{
if (dropSegment)
{
mergedUpdates.DropChanges();
}
// Pass false for assertInfoLive because the merged
// segment is not yet live (only below do we commit it
// to the segmentInfos):
readerPool.Release(mergedUpdates, false);
success = true;
}
finally
{
if (!success)
{
mergedUpdates.DropChanges();
readerPool.Drop(merge.Info_Renamed);
}
}
}
// Must do this after readerPool.release, in case an
// exception is hit e.g. writing the live docs for the
// merge segment, in which case we need to abort the
//.........这里部分代码省略.........
开发者ID:joyanta,项目名称:lucene.net,代码行数:101,代码来源:IndexWriter.cs
示例12: CommitMergedDeletesAndUpdates
/// <summary>
/// Carefully merges deletes and updates for the segments we just merged. this
/// is tricky because, although merging will clear all deletes (compacts the
/// documents) and compact all the updates, new deletes and updates may have
/// been flushed to the segments since the merge was started. this method
/// "carries over" such new deletes and updates onto the newly merged segment,
/// and saves the resulting deletes and updates files (incrementing the delete
/// and DV generations for merge.info). If no deletes were flushed, no new
/// deletes file is saved.
/// </summary>
private ReadersAndUpdates CommitMergedDeletesAndUpdates(MergePolicy.OneMerge merge, MergeState mergeState)
{
lock (this)
{
Debug.Assert(TestPoint("startCommitMergeDeletes"));
IList<SegmentCommitInfo> sourceSegments = merge.Segments;
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "commitMergeDeletes " + SegString(merge.Segments));
}
// Carefully merge deletes that occurred after we
// started merging:
int docUpto = 0;
long minGen = long.MaxValue;
// Lazy init (only when we find a delete to carry over):
MergedDeletesAndUpdates holder = new MergedDeletesAndUpdates();
DocValuesFieldUpdates.Container mergedDVUpdates = new DocValuesFieldUpdates.Container();
for (int i = 0; i < sourceSegments.Count; i++)
{
SegmentCommitInfo info = sourceSegments[i];
minGen = Math.Min(info.BufferedDeletesGen, minGen);
int docCount = info.Info.DocCount;
Bits prevLiveDocs = merge.Readers[i].LiveDocs;
ReadersAndUpdates rld = readerPool.Get(info, false);
// We hold a ref so it should still be in the pool:
Debug.Assert(rld != null, "seg=" + info.Info.Name);
Bits currentLiveDocs = rld.LiveDocs;
IDictionary<string, DocValuesFieldUpdates> mergingFieldUpdates = rld.MergingFieldUpdates;
string[] mergingFields;
DocValuesFieldUpdates[] dvFieldUpdates;
DocValuesFieldUpdates.Iterator[] updatesIters;
if (mergingFieldUpdates.Count == 0)
{
mergingFields = null;
updatesIters = null;
dvFieldUpdates = null;
}
else
{
mergingFields = new string[mergingFieldUpdates.Count];
dvFieldUpdates = new DocValuesFieldUpdates[mergingFieldUpdates.Count];
updatesIters = new DocValuesFieldUpdates.Iterator[mergingFieldUpdates.Count];
int idx = 0;
foreach (KeyValuePair<string, DocValuesFieldUpdates> e in mergingFieldUpdates)
{
string field = e.Key;
DocValuesFieldUpdates updates = e.Value;
mergingFields[idx] = field;
dvFieldUpdates[idx] = mergedDVUpdates.GetUpdates(field, updates.Type);
if (dvFieldUpdates[idx] == null)
{
dvFieldUpdates[idx] = mergedDVUpdates.NewUpdates(field, updates.Type, mergeState.SegmentInfo.DocCount);
}
updatesIters[idx] = updates.GetIterator();
updatesIters[idx].NextDoc(); // advance to first update doc
++idx;
}
}
// System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMergedDeletes: info=" + info + ", mergingUpdates=" + mergingUpdates);
if (prevLiveDocs != null)
{
// If we had deletions on starting the merge we must
// still have deletions now:
Debug.Assert(currentLiveDocs != null);
Debug.Assert(prevLiveDocs.Length() == docCount);
Debug.Assert(currentLiveDocs.Length() == docCount);
// There were deletes on this segment when the merge
// started. The merge has collapsed away those
// deletes, but, if new deletes were flushed since
// the merge started, we must now carefully keep any
// newly flushed deletes but mapping them to the new
// docIDs.
// Since we copy-on-write, if any new deletes were
// applied after merging has started, we can just
// check if the before/after liveDocs have changed.
// If so, we must carefully merge the liveDocs one
// doc at a time:
if (currentLiveDocs != prevLiveDocs)
{
// this means this segment received new deletes
// since we started the merge, so we
// must merge them:
//.........这里部分代码省略.........
开发者ID:joyanta,项目名称:lucene.net,代码行数:101,代码来源:IndexWriter.cs
示例13: MaybeApplyMergedDVUpdates
private void MaybeApplyMergedDVUpdates(MergePolicy.OneMerge merge, MergeState mergeState, int docUpto, MergedDeletesAndUpdates holder, string[] mergingFields, DocValuesFieldUpdates[] dvFieldUpdates, DocValuesFieldUpdates.Iterator[] updatesIters, int curDoc)
{
int newDoc = -1;
for (int idx = 0; idx < mergingFields.Length; idx++)
{
DocValuesFieldUpdates.Iterator updatesIter = updatesIters[idx];
if (updatesIter.Doc() == curDoc) // document has an update
{
if (holder.MergedDeletesAndUpdates_Renamed == null)
{
holder.Init(readerPool, merge, mergeState, false);
}
if (newDoc == -1) // map once per all field updates, but only if there are any updates
{
newDoc = holder.DocMap.Map(docUpto);
}
DocValuesFieldUpdates dvUpdates = dvFieldUpdates[idx];
dvUpdates.Add(newDoc, updatesIter.Value());
updatesIter.NextDoc(); // advance to next document
}
else
{
Debug.Assert(updatesIter.Doc() > curDoc, "field=" + mergingFields[idx] + " updateDoc=" + updatesIter.Doc() + " curDoc=" + curDoc);
}
}
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:26,代码来源:IndexWriter.cs
示例14: Init
internal void Init(ReaderPool readerPool, MergePolicy.OneMerge merge, MergeState mergeState, bool initWritableLiveDocs)
{
if (MergedDeletesAndUpdates_Renamed == null)
{
MergedDeletesAndUpdates_Renamed = readerPool.Get(merge.Info_Renamed, true);
DocMap = merge.GetDocMap(mergeState);
Debug.Assert(DocMap.IsConsistent(merge.Info_Renamed.Info.DocCount));
}
if (initWritableLiveDocs && !InitializedWritableLiveDocs)
{
MergedDeletesAndUpdates_Renamed.InitWritableLiveDocs();
this.InitializedWritableLiveDocs = true;
}
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:14,代码来源:IndexWriter.cs
示例15: EnsureValidMerge
private void EnsureValidMerge(MergePolicy.OneMerge merge)
{
lock (this)
{
foreach (SegmentCommitInfo info in merge.Segments)
{
if (!segmentInfos.Contains(info))
{
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.Info.Name + ") that is not in the current index " + SegString(), directory);
}
}
}
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:13,代码来源:IndexWriter.cs
示例16: AddMergeException
internal virtual void AddMergeException(MergePolicy.OneMerge merge)
{
lock (this)
{
Debug.Assert(merge.Exception != null);
if (!MergeExceptions.Contains(merge) && MergeGen == merge.MergeGen)
{
MergeExceptions.Add(merge);
}
}
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:11,代码来源:IndexWriter.cs
示例17: IndexWriter
internal readonly Codec Codec; // for writing new segments
/// <summary>
/// Constructs a new IndexWriter per the settings given in <code>conf</code>.
/// If you want to make "live" changes to this writer instance, use
/// <seealso cref="#getConfig()"/>.
///
/// <p>
/// <b>NOTE:</b> after ths writer is created, the given configuration instance
/// cannot be passed to another writer. If you intend to do so, you should
/// <seealso cref="IndexWriterConfig#clone() clone"/> it beforehand.
/// </summary>
/// <param name="d">
/// the index directory. The index is either created or appended
/// according <code>conf.getOpenMode()</code>. </param>
/// <param name="conf">
/// the configuration settings according to which IndexWriter should
/// be initialized. </param>
/// <exception cref="IOException">
/// if the directory cannot be read/written to, or if it does not
/// exist and <code>conf.getOpenMode()</code> is
/// <code>OpenMode.APPEND</code> or if there is any other low-level
/// IO error </exception>
public IndexWriter(Directory d, IndexWriterConfig conf)
{
/*if (!InstanceFieldsInitialized)
{
InitializeInstanceFields();
InstanceFieldsInitialized = true;
}*/
readerPool = new ReaderPool(this);
conf.SetIndexWriter(this); // prevent reuse by other instances
Config_Renamed = new LiveIndexWriterConfig(conf);
directory = d;
analyzer = Config_Renamed.Analyzer;
infoStream = Config_Renamed.InfoStream;
mergePolicy = Config_Renamed.MergePolicy;
mergePolicy.IndexWriter = this;
mergeScheduler = Config_Renamed.MergeScheduler;
Codec = Config_Renamed.Codec;
BufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
PoolReaders = Config_Renamed.ReaderPooling;
WriteLock = directory.MakeLock(WRITE_LOCK_NAME);
if (!WriteLock.Obtain(Config_Renamed.WriteLockTimeout)) // obtain write lock
{
throw new LockObtainFailedException("Index locked for write: " + WriteLock);
}
bool success = false;
try
{
OpenMode_e? mode = Config_Renamed.OpenMode;
bool create;
if (mode == OpenMode_e.CREATE)
{
create = true;
}
else if (mode == OpenMode_e.APPEND)
{
create = false;
}
else
{
// CREATE_OR_APPEND - create only if an index does not exist
create = !DirectoryReader.IndexExists(directory);
}
// If index is too old, reading the segments will throw
// IndexFormatTooOldException.
segmentInfos = new SegmentInfos();
bool initialIndexExists = true;
if (create)
{
// Try to read first. this is to allow create
// against an index that's currently open for
// searching. In this case we write the next
// segments_N file with no segments:
try
{
segmentInfos.Read(directory);
segmentInfos.Clear();
}
catch (IOException)
{
// Likely this means it's a fresh directory
initialIndexExists = false;
}
// Record that we have a change (zero out all
// segments) pending:
Changed();
}
else
{
segmentInfos.Read(directory);
//.........这里部分代码省略.........
开发者ID:joyanta,项目名称:lucene.net,代码行数:101,代码来源:IndexWriter.cs
示例18: MergeSuccess
/// <summary>
/// Hook that's called when the specified merge is complete. </summary>
internal virtual void MergeSuccess(MergePolicy.OneMerge merge)
{
}
开发者ID:joyanta,项目名称:lucene.net,代码行数:5,代码来源:IndexWriter.cs
示例19: GetMergeThread
/// <summary>Create and return a new MergeThread </summary>
protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
{
lock (this)
{
MergeThread thread = new MergeThread(this, writer, merge);
thread.SetThreadPriority(mergeThreadPriority);
thread.IsBackground = true;
thread.Name = "Lucene Merge Thread #" + mergeThreadCount++;
return thread;
}
}
开发者ID:jhuntsman,项目名称:FlexNet,代码行数:12,代码来源:ConcurrentMergeScheduler.cs
示例20: RegisterMerge
/// <summary>
/// Checks whether this merge involves any segments
/// already participating in a merge. If not, this merge
/// is "registered", meaning we record that its segments
/// are now participating in a merge, and true is
/// returned. Else (the merge conflicts) false is
/// returned.
/// </summary>
internal bool RegisterMerge(MergePolicy.OneMerge merge)
{
lock (this)
{
if (merge.RegisterDone)
{
return true;
}
Debug.Assert(merge.Segments.Count > 0);
if (StopMerges)
{
merge.Abort();
throw new MergePolicy.MergeAbortedException("merge is aborted: " + SegString(merge.Segments));
}
bool isExternal = false;
foreach (SegmentCommitInfo info in merge.Segments)
{
if (mergingSegments.Contains(info))
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "reject merge " + SegString(merge.Segments) + ": segment " + SegString(info) + " is already marked for merge");
}
return false;
}
if (!segmentInfos.Contains(info))
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "reject merge " + SegString(merge.Segments) + ": segment " + SegString(info) + " does not exist in live infos");
}
return false;
}
if (info.Info.Dir != directory)
{
isExternal = true;
}
if (SegmentsToMerge.ContainsKey(info))
{
merge.MaxNumSegments = MergeMaxNumSegments;
}
}
EnsureValidMerge(merge);
PendingMerges.AddLast(merge);
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "add merge to pendingMerges: " + SegString(merge.Segments) + " [total " + PendingMerges.Count + " pending]");
}
merge.MergeGen = MergeGen;
merge.IsExternal = isExternal;
// OK it does not conflict; now record that this merge
// is running (while synchronized) to avoid race
// condition where two conflicting merges from different
// threads, start
if (infoStream.IsEnabled("IW"))
{
StringBuilder builder = new StringBuilder("registerMerge merging= [");
foreach (SegmentCommitInfo info in mergingSegments)
{
builder.Append(info.Info.Name).Append(", ");
}
builder.Append("]");
// don't call mergingSegments.toString() could lead to ConcurrentModException
// since merge updates the segments FieldInfos
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", builder.ToString());
}
}
foreach (SegmentCommitInfo info in merge.Segments)
{
if (infoStream.IsEnabled("IW"))
{
infoStream.Message("IW", "registerMerge info=" + SegString(info));
}
mergingSegments.Add(info);
}
Debug.Assert(merge.EstimatedMergeBytes == 0);
Debug.Assert(merge.TotalMergeBytes == 0);
foreach (SegmentCommitInfo info in merge.Segments)
{
if (info.Info.DocCount > 0)
{
int delCount = NumDeletedDocs(info);
//.........这里部分代码省略.........
开发者ID:joyanta,项目名称:lucene.net,代码行数:101,代码来源:IndexWriter.cs
注:本文中的Lucene.Net.Index.MergePolicy类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论