• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C# Index.Term类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C#中Lucene.Net.Index.Term的典型用法代码示例。如果您正苦于以下问题:C# Term类的具体用法?C# Term怎么用?C# Term使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



Term类属于Lucene.Net.Index命名空间,在下文中一共展示了Term类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。

示例1: UpdatePackage

        public void UpdatePackage(Package package)
        {
            var packageRegistrationKey = package.PackageRegistrationKey;
            var updateTerm = new Term("PackageRegistrationKey", packageRegistrationKey.ToString(CultureInfo.InvariantCulture));

            if (!package.IsLatest || package.IsLatestStable)
            {
                // Someone passed us in a version which was e.g. just unlisted? Or just not the latest version which is what we want to index. Doesn't really matter. We'll find one to index.
                package = _packageRepository.GetAll()
                .Where(p => (p.IsLatest || p.IsLatestStable) && p.PackageRegistrationKey == packageRegistrationKey)
                .Include(p => p.PackageRegistration)
                .Include(p => p.PackageRegistration.Owners)
                .Include(p => p.SupportedFrameworks)
                .FirstOrDefault();
            }

            // Just update the provided package
            using (Trace.Activity(String.Format(CultureInfo.CurrentCulture, "Updating Lucene Index for: {0} {1} [PackageKey:{2}]", package.PackageRegistration.Id, package.Version, package.Key)))
            {
                EnsureIndexWriter(creatingIndex: false);
                if (package != null)
                {
                    var indexEntity = new PackageIndexEntity(package);
                    Trace.Information(String.Format(CultureInfo.CurrentCulture, "Updating Document: {0}", updateTerm.ToString()));
                    _indexWriter.UpdateDocument(updateTerm, indexEntity.ToDocument());
                }
                else
                {
                    Trace.Information(String.Format(CultureInfo.CurrentCulture, "Deleting Document: {0}", updateTerm.ToString()));
                    _indexWriter.DeleteDocuments(updateTerm);
                }
                _indexWriter.Commit();
            }
        }
开发者ID:anurse,项目名称:NuGetGallery,代码行数:34,代码来源:LuceneIndexingService.cs


示例2: SegmentMergeInfo

		internal SegmentMergeInfo(int b, TermEnum te, IndexReader r)
		{
			base_Renamed = b;
			reader = r;
			termEnum = te;
			term = te.Term;
		}
开发者ID:modulexcite,项目名称:Xamarin-Lucene.Net,代码行数:7,代码来源:SegmentMergeInfo.cs


示例3: DocFreq

 public override int DocFreq(Term t)
 {
     int total = 0; // sum freqs in segments
     for (int i = 0; i < subReaders.Length; i++)
         total += subReaders[i].DocFreq(t);
     return total;
 }
开发者ID:kiichi7,项目名称:Search-Engine,代码行数:7,代码来源:MultiReader.cs


示例4: AddNumericRangeQuery

 protected void AddNumericRangeQuery(BooleanQuery query, NumericRangeField range, BooleanClause.Occur occurance)
 {
     var startTerm = new Term(range.FieldName, NumberTools.LongToString(range.Start));
     var endTerm = new Term(range.FieldName, NumberTools.LongToString(range.End));
     var rangeQuery = new RangeQuery(startTerm, endTerm, true);
     query.Add(rangeQuery, occurance);
 }
开发者ID:katebutenko,项目名称:SitecoreSearchContrib,代码行数:7,代码来源:NumericRangeSearchParam.cs


示例5: GetQuery

        public virtual Query GetQuery(XmlElement e)
        {
            string fieldName = DOMUtils.GetAttributeWithInheritanceOrFail(e, "fieldName");
            string text = DOMUtils.GetNonBlankTextOrFail(e);

            BooleanQuery bq = new BooleanQuery(DOMUtils.GetAttribute(e, "disableCoord", false));
            bq.MinimumNumberShouldMatch = DOMUtils.GetAttribute(e, "minimumNumberShouldMatch", 0);
            TokenStream ts = null;
            try
            {
                ts = analyzer.TokenStream(fieldName, text);
                ITermToBytesRefAttribute termAtt = ts.AddAttribute<ITermToBytesRefAttribute>();
                Term term = null;
                BytesRef bytes = termAtt.BytesRef;
                ts.Reset();
                while (ts.IncrementToken())
                {
                    termAtt.FillBytesRef();
                    term = new Term(fieldName, BytesRef.DeepCopyOf(bytes));
                    bq.Add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD));
                }
                ts.End();
            }
            catch (IOException ioe)
            {
                throw new Exception("Error constructing terms from index:" + ioe);
            }
            finally
            {
                IOUtils.CloseWhileHandlingException(ts);
            }

            bq.Boost = DOMUtils.GetAttribute(e, "boost", 1.0f);
            return bq;
        }
开发者ID:apache,项目名称:lucenenet,代码行数:35,代码来源:TermsQueryBuilder.cs


示例6: AddNumericRangeQuery

 protected void AddNumericRangeQuery(BooleanQuery query, NumericRangeField range, BooleanClause.Occur occurance)
 {
     var startTerm = new Term(range.FieldName, SearchHelper.FormatNumber(range.Start));
      var endTerm = new Term(range.FieldName, SearchHelper.FormatNumber(range.End));
      var rangeQuery = new RangeQuery(startTerm, endTerm, true);
      query.Add(rangeQuery, occurance);
 }
开发者ID:mebinum,项目名称:AdvanceDb,代码行数:7,代码来源:NumericRangeSearchParam.cs


示例7: Test

        public virtual void Test()
        {
            Term allTerm = new Term(FIELD, "all");
            TermQuery termQuery = new TermQuery(allTerm);

            Weight weight = IndexSearcher.CreateNormalizedWeight(termQuery);
            Assert.IsTrue(IndexSearcher.TopReaderContext is AtomicReaderContext);
            AtomicReaderContext context = (AtomicReaderContext)IndexSearcher.TopReaderContext;
            BulkScorer ts = weight.BulkScorer(context, true, ((AtomicReader)context.Reader()).LiveDocs);
            // we have 2 documents with the term all in them, one document for all the
            // other values
            IList<TestHit> docs = new List<TestHit>();
            // must call next first

            ts.Score(new CollectorAnonymousInnerClassHelper(this, context, docs));
            Assert.IsTrue(docs.Count == 2, "docs Size: " + docs.Count + " is not: " + 2);
            TestHit doc0 = docs[0];
            TestHit doc5 = docs[1];
            // The scores should be the same
            Assert.IsTrue(doc0.Score == doc5.Score, doc0.Score + " does not equal: " + doc5.Score);
            /*
             * Score should be (based on Default Sim.: All floats are approximate tf = 1
             * numDocs = 6 docFreq(all) = 2 idf = ln(6/3) + 1 = 1.693147 idf ^ 2 =
             * 2.8667 boost = 1 lengthNorm = 1 //there is 1 term in every document coord
             * = 1 sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
             * queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
             *
             * score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
             */
            Assert.IsTrue(doc0.Score == 1.6931472f, doc0.Score + " does not equal: " + 1.6931472f);
        }
开发者ID:joyanta,项目名称:lucene.net,代码行数:31,代码来源:TestTermScorer.cs


示例8: SlowFuzzyQuery

        /// <summary>
        /// Create a new <see cref="SlowFuzzyQuery"/> that will match terms with a similarity 
        /// of at least <paramref name="minimumSimilarity"/> to <paramref name="term"/>.
        /// If a <paramref name="prefixLength"/> &gt; 0 is specified, a common prefix
        /// of that length is also required.
        /// </summary>
        /// <param name="term">the term to search for</param>
        /// <param name="minimumSimilarity">
        /// a value between 0 and 1 to set the required similarity
        /// between the query term and the matching terms. For example, for a
        /// <paramref name="minimumSimilarity"/> of <c>0.5</c> a term of the same length
        /// as the query term is considered similar to the query term if the edit distance
        /// between both terms is less than <c>length(term)*0.5</c>
        /// <para/>
        /// Alternatively, if <paramref name="minimumSimilarity"/> is >= 1f, it is interpreted
        /// as a pure Levenshtein edit distance. For example, a value of <c>2f</c>
        /// will match all terms within an edit distance of <c>2</c> from the
        /// query term. Edit distances specified in this way may not be fractional.
        /// </param>
        /// <param name="prefixLength">length of common (non-fuzzy) prefix</param>
        /// <param name="maxExpansions">
        /// the maximum number of terms to match. If this number is
        /// greater than <see cref="BooleanQuery.MaxClauseCount"/> when the query is rewritten,
        /// then the maxClauseCount will be used instead.
        /// </param>
        /// <exception cref="ArgumentException">
        /// if <paramref name="minimumSimilarity"/> is &gt;= 1 or &lt; 0
        /// or if <paramref name="prefixLength"/> &lt; 0
        /// </exception>
        public SlowFuzzyQuery(Term term, float minimumSimilarity, int prefixLength,
            int maxExpansions)
            : base(term.Field)
        {
            this.term = term;

            if (minimumSimilarity >= 1.0f && minimumSimilarity != (int)minimumSimilarity)
                throw new ArgumentException("fractional edit distances are not allowed");
            if (minimumSimilarity < 0.0f)
                throw new ArgumentException("minimumSimilarity < 0");
            if (prefixLength < 0)
                throw new ArgumentException("prefixLength < 0");
            if (maxExpansions < 0)
                throw new ArgumentException("maxExpansions < 0");

            SetRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(maxExpansions));

            string text = term.Text();
            int len = text.CodePointCount(0, text.Length);
            if (len > 0 && (minimumSimilarity >= 1f || len > 1.0f / (1.0f - minimumSimilarity)))
            {
                this.termLongEnough = true;
            }

            this.minimumSimilarity = minimumSimilarity;
            this.prefixLength = prefixLength;
        }
开发者ID:apache,项目名称:lucenenet,代码行数:56,代码来源:SlowFuzzyQuery.cs


示例9: CheckSplitting

        private void CheckSplitting(Directory dir, Term splitTerm, int leftCount, int rightCount)
        {
            using (Directory dir1 = NewDirectory())
            {
                using (Directory dir2 = NewDirectory())
                {
                    PKIndexSplitter splitter = new PKIndexSplitter(dir, dir1, dir2, splitTerm,
                        NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())),
                        NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
                    splitter.Split();

                    using (IndexReader ir1 = DirectoryReader.Open(dir1))
                    {
                        using (IndexReader ir2 = DirectoryReader.Open(dir2))
                        {
                            assertEquals(leftCount, ir1.NumDocs);
                            assertEquals(rightCount, ir2.NumDocs);


                            CheckContents(ir1, "1");
                            CheckContents(ir2, "2");

                        }
                    }
                }
            }
        }
开发者ID:ChristopherHaws,项目名称:lucenenet,代码行数:27,代码来源:TestPKIndexSplitter.cs


示例10: ProcessTermClause

        static void ProcessTermClause(BooleanQuery bq, TermClause term)
        {
            Term t = new Term(term.Field, term.Value.ToLower());

            Lucene.Net.Search.Query q = null;

            if (term.ValueType == TermClauseType.Wildcard && term.Value.IndexOf('*') == term.Value.Length - 1)
            {
                q = new PrefixQuery(new Term(term.Field, term.Value.Substring(0, term.Value.Length - 1)));
            }
            else if (term.ValueType == TermClauseType.Wildcard)
            {
                q = new WildcardQuery(t);
            }
            else if (term.ValueType == TermClauseType.Fuzzy)
            {
                q = new FuzzyQuery(t);
            }
            else
            {
                q = new TermQuery(t);
            }

            if (term.Boost > 0)
            {
                q.SetBoost(term.Boost);
            }

            bq.Add(
                q,
                Translate(term.Type)
                );
        }
开发者ID:mrkurt,项目名称:mubble-old,代码行数:33,代码来源:LuceneQueryTranslator.cs


示例11: PhrasePositions

        internal readonly Term[] Terms; // for repetitions initialization

        internal PhrasePositions(DocsAndPositionsEnum postings, int o, int ord, Term[] terms)
        {
            this.Postings = postings;
            Offset = o;
            this.Ord = ord;
            this.Terms = terms;
        }
开发者ID:Cefa68000,项目名称:lucenenet,代码行数:9,代码来源:PhrasePositions.cs


示例12: TestAnyChanges

 public virtual void TestAnyChanges()
 {
     DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
     int size = 200 + Random().Next(500) * RANDOM_MULTIPLIER;
     int termsSinceFreeze = 0;
     int queriesSinceFreeze = 0;
     for (int i = 0; i < size; i++)
     {
         Term term = new Term("id", "" + i);
         if (Random().Next(10) == 0)
         {
             queue.AddDelete(new TermQuery(term));
             queriesSinceFreeze++;
         }
         else
         {
             queue.AddDelete(term);
             termsSinceFreeze++;
         }
         Assert.IsTrue(queue.AnyChanges());
         if (Random().Next(5) == 0)
         {
             FrozenBufferedUpdates freezeGlobalBuffer = queue.FreezeGlobalBuffer(null);
             Assert.AreEqual(termsSinceFreeze, freezeGlobalBuffer.TermCount);
             Assert.AreEqual(queriesSinceFreeze, ((Query[])freezeGlobalBuffer.Queries_Nunit()).Length);
             queriesSinceFreeze = 0;
             termsSinceFreeze = 0;
             Assert.IsFalse(queue.AnyChanges());
         }
     }
 }
开发者ID:WakeflyCBass,项目名称:lucenenet,代码行数:31,代码来源:TestDocumentsWriterDeleteQueue.cs


示例13: Search

		public Result Search (string term, int count, int start) {
			try {
				term = term.ToLower ();
				Term htTerm = new Term ("hottext", term);
				Query qq1 = new FuzzyQuery (htTerm);
				Query qq2 = new TermQuery (htTerm);
				qq2.Boost = 10f;
				Query qq3 = new PrefixQuery (htTerm);
				qq3.Boost = 10f;
				DisjunctionMaxQuery q1 = new DisjunctionMaxQuery (0f);
				q1.Add (qq1);
				q1.Add (qq2);
				q1.Add (qq3);
				Query q2 = new TermQuery (new Term ("text", term));
				q2.Boost = 3f;
				Query q3 = new TermQuery (new Term ("examples", term));
				q3.Boost = 3f;
				DisjunctionMaxQuery q = new DisjunctionMaxQuery (0f);

				q.Add (q1);
				q.Add (q2);
				q.Add (q3);
			
				TopDocs top = SearchInternal (q, count, start);
				Result r = new Result (term, searcher, top.ScoreDocs);
				Results.Add (r);
				return r;
			} catch (IOException) {
				Console.WriteLine ("No index in {0}", dir);
				return null;
			}
		}
开发者ID:runefs,项目名称:Marvin_mono,代码行数:32,代码来源:SearchableIndex.cs


示例14: DocValuesUpdate

        internal int DocIDUpto = -1; // unassigned until applied, and confusing that it's here, when it's just used in BufferedDeletes...

        /// <summary>
        /// Constructor.
        /// </summary>
        /// <param name="term"> the <seealso cref="Term"/> which determines the documents that will be updated </param>
        /// <param name="field"> the <seealso cref="NumericDocValuesField"/> to update </param>
        /// <param name="value"> the updated value </param>
        protected internal DocValuesUpdate(DocValuesFieldUpdates.Type_e type, Term term, string field, object value)
        {
            this.Type = type;
            this.Term = term;
            this.Field = field;
            this.Value = value;
        }
开发者ID:joyanta,项目名称:lucene.net,代码行数:15,代码来源:DocValuesUpdate.cs


示例15: AndExtension

        public void AndExtension()
        {
            BooleanQuery originalQuery = new BooleanQuery();
            BooleanQuery innerQuery = new BooleanQuery();

            Term term = new Term("_name", "value1");
            TermQuery termQuery1 = new TermQuery(term);
            innerQuery.Add(termQuery1, Occur.MUST);

            Term term2 = new Term("_name", "value2");
            TermQuery termQuery2 = new TermQuery(term2);
            innerQuery.Add(termQuery2, Occur.MUST);

            originalQuery.Add(innerQuery, Occur.MUST);
            string queryString = originalQuery.ToString();

            QueryBuilder builder = new QueryBuilder();
            builder.And
                (
                    x => x.Term("_name", "value1"),
                    x => x.Term("_name", "value2")
                );
            Query replacementQuery = builder.Build();
            string newQueryString = replacementQuery.ToString();

            Assert.AreEqual(queryString, newQueryString);
            Console.Write(queryString);
        }
开发者ID:modulexcite,项目名称:Lucinq,代码行数:28,代码来源:EquivalencyTests.cs


示例16: TestSimpleSkip

        public virtual void TestSimpleSkip()
        {
            Directory dir = new CountingRAMDirectory(this, new RAMDirectory());
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).SetCodec(TestUtil.AlwaysPostingsFormat(new Lucene41PostingsFormat())).SetMergePolicy(NewLogMergePolicy()));
            Term term = new Term("test", "a");
            for (int i = 0; i < 5000; i++)
            {
                Document d1 = new Document();
                d1.Add(NewTextField(term.Field(), term.Text(), Field.Store.NO));
                writer.AddDocument(d1);
            }
            writer.Commit();
            writer.ForceMerge(1);
            writer.Dispose();

            AtomicReader reader = GetOnlySegmentReader(DirectoryReader.Open(dir));

            for (int i = 0; i < 2; i++)
            {
                Counter = 0;
                DocsAndPositionsEnum tp = reader.TermPositionsEnum(term);
                CheckSkipTo(tp, 14, 185); // no skips
                CheckSkipTo(tp, 17, 190); // one skip on level 0
                CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0

                // this test would fail if we had only one skip level,
                // because than more bytes would be read from the freqStream
                CheckSkipTo(tp, 4800, 250); // one skip on level 2
            }
        }
开发者ID:paulirwin,项目名称:lucene.net,代码行数:30,代码来源:TestMultiLevelSkipList.cs


示例17: TestRollbackIntegrityWithBufferFlush

        public void TestRollbackIntegrityWithBufferFlush()
        {
            Directory dir = new MockRAMDirectory();
            IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
            for (int i = 0; i < 5; i++)
            {
                Document doc = new Document();
                doc.Add(new Field("pk", i.ToString(), Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                w.AddDocument(doc);
            }
            w.Close();

            // If buffer size is small enough to cause a flush, errors ensue...
            w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
            w.SetMaxBufferedDocs(2);

            Term pkTerm = new Term("pk", "");
            for (int i = 0; i < 3; i++)
            {
                Document doc = new Document();
                String value = i.ToString();
                doc.Add(new Field("pk", value, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                doc.Add(new Field("text", "foo", Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                w.UpdateDocument(pkTerm.CreateTerm(value), doc);
            }
            w.Rollback();

            IndexReader r = IndexReader.Open(dir, true);
            Assert.AreEqual(5, r.NumDocs(), "index should contain same number of docs post rollback");
            r.Close();
            dir.Close();
        }
开发者ID:Mpdreamz,项目名称:lucene.net,代码行数:32,代码来源:TestRollback.cs


示例18: FilterQueryByClasses

        public static Lucene.Net.Search.Query FilterQueryByClasses(IESI.ISet<System.Type> classesAndSubclasses, Lucene.Net.Search.Query luceneQuery)
        {
            // A query filter is more practical than a manual class filtering post query (esp on scrollable resultsets)
            // it also probably minimise the memory footprint
            if (classesAndSubclasses == null)
            {
                return luceneQuery;
            }

            BooleanQuery classFilter = new BooleanQuery();

            // annihilate the scoring impact of DocumentBuilder.CLASS_FIELDNAME
            classFilter.SetBoost(0);
            foreach (System.Type clazz in classesAndSubclasses)
            {
                Term t = new Term(DocumentBuilder.CLASS_FIELDNAME, TypeHelper.LuceneTypeName(clazz));
                TermQuery termQuery = new TermQuery(t);
                classFilter.Add(termQuery, BooleanClause.Occur.SHOULD);
            }

            BooleanQuery filteredQuery = new BooleanQuery();
            filteredQuery.Add(luceneQuery, BooleanClause.Occur.MUST);
            filteredQuery.Add(classFilter, BooleanClause.Occur.MUST);
            return filteredQuery;
        }
开发者ID:kstenson,项目名称:NHibernate.Search,代码行数:25,代码来源:FullTextSearchHelper.cs


示例19: ParseRange

        public static BooleanQuery ParseRange(string fieldName, long lowerValue, long upperValue, bool inclusive)
        {
            if (lowerValue > upperValue)
            {
                return null;
            }

            //var rangeQuery = new BooleanQuery();
            var dateQuery = new BooleanQuery();
            BooleanQuery.SetMaxClauseCount(int.MaxValue);

            for (long i = lowerValue; i < upperValue; i++)
            {
                var term = new Term(fieldName, i.ToString());
                var q = new TermQuery(term);
                dateQuery.Add(q, BooleanClause.Occur.SHOULD);
            }

            if (inclusive)
            {
                var term = new Term(fieldName, upperValue.ToString());
                var q = new TermQuery(term);
                dateQuery.Add(q, BooleanClause.Occur.SHOULD);
            }

            //if (dateQuery.GetClauses() != null || dateQuery.GetClauses().Length != 0)
            //{
            //    rangeQuery.Add(dateQuery, BooleanClause.Occur.MUST);
            //}

            return dateQuery;
        }
开发者ID:sinsay,项目名称:SSE,代码行数:32,代码来源:QueryParser.cs


示例20: TestSimpleSkip

		public virtual void  TestSimpleSkip()
		{
			RAMDirectory dir = new RAMDirectory();
			IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
			Term term = new Term("test", "a");
			for (int i = 0; i < 5000; i++)
			{
				Document d1 = new Document();
				d1.Add(new Field(term.Field(), term.Text(), Field.Store.NO, Field.Index.ANALYZED));
				writer.AddDocument(d1);
			}
			writer.Flush();
			writer.Optimize();
			writer.Close();
			
			IndexReader reader = SegmentReader.GetOnlySegmentReader(dir);
			SegmentTermPositions tp = (SegmentTermPositions) reader.TermPositions();
			tp.freqStream_ForNUnit = new CountingStream(this, tp.freqStream_ForNUnit);
			
			for (int i = 0; i < 2; i++)
			{
				counter = 0;
				tp.Seek(term);
				
				CheckSkipTo(tp, 14, 185); // no skips
				CheckSkipTo(tp, 17, 190); // one skip on level 0
				CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
				
				// this test would fail if we had only one skip level,
				// because than more bytes would be read from the freqStream
				CheckSkipTo(tp, 4800, 250); // one skip on level 2
			}
		}
开发者ID:Rationalle,项目名称:ravendb,代码行数:33,代码来源:TestMultiLevelSkipList.cs



注:本文中的Lucene.Net.Index.Term类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C# Index.TermEnum类代码示例发布时间:2022-05-26
下一篇:
C# Index.SegmentWriteState类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap