本文整理汇总了Java中org.apache.lucene.analysis.tr.TurkishAnalyzer类的典型用法代码示例。如果您正苦于以下问题:Java TurkishAnalyzer类的具体用法?Java TurkishAnalyzer怎么用?Java TurkishAnalyzer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TurkishAnalyzer类属于org.apache.lucene.analysis.tr包,在下文中一共展示了TurkishAnalyzer类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testStandardTokenizerVersions
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
public void testStandardTokenizerVersions() throws Exception {
assertEquals(DEFAULT_VERSION, solrConfig.luceneMatchVersion);
final IndexSchema schema = h.getCore().getLatestSchema();
FieldType type = schema.getFieldType("textDefault");
TokenizerChain ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(DEFAULT_VERSION, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(DEFAULT_VERSION, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("text40");
ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(Version.LUCENE_4_0_0_ALPHA, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(Version.LUCENE_4_3_0, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("textTurkishAnalyzerDefault");
Analyzer ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof TurkishAnalyzer);
assertEquals(DEFAULT_VERSION, ana1.getVersion());
type = schema.getFieldType("textTurkishAnalyzer40");
ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof TurkishAnalyzer);
assertEquals(Version.LUCENE_4_0_0_ALPHA, ana1.getVersion());
}
开发者ID:europeana,项目名称:search,代码行数:26,代码来源:TestLuceneMatchVersion.java
示例2: TurkishAnalyzerProvider
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
public TurkishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new TurkishAnalyzer(
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, TurkishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
);
analyzer.setVersion(version);
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:9,代码来源:TurkishAnalyzerProvider.java
示例3: TurkishAnalyzerProvider
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
@Inject
public TurkishAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettingsService.getSettings(), name, settings);
analyzer = new TurkishAnalyzer(Analysis.parseStopWords(env, settings, TurkishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
analyzer.setVersion(version);
}
开发者ID:baidu,项目名称:Elasticsearch,代码行数:8,代码来源:TurkishAnalyzerProvider.java
示例4: getDefaultStopSet
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
/**
* Ritorna il set di stop words di default per una lingua
*
* @param language lingua
* @return set di stop words
*/
public static CharArraySet getDefaultStopSet(String language) {
try {
if ("en".equalsIgnoreCase(language)) {
return StandardAnalyzer.STOP_WORDS_SET;
} else if ("es".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "spanish_stop.txt", StandardCharsets.UTF_8));
} else if ("fr".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "french_stop.txt", StandardCharsets.UTF_8));
} else if ("de".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "german_stop.txt", StandardCharsets.UTF_8));
} else if ("pl".equalsIgnoreCase(language)) {
return WordlistLoader.getWordSet(IOUtils.getDecodingReader(PolishAnalyzer.class, "stopwords.txt", StandardCharsets.UTF_8), "#");
} else if ("pt".equalsIgnoreCase(language) || "br".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "portuguese_stop.txt", StandardCharsets.UTF_8));
} else if ("it".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "italian_stop.txt", StandardCharsets.UTF_8));
} else if ("cz".equalsIgnoreCase(language) || "sk".equalsIgnoreCase(language)) {
return WordlistLoader.getWordSet(IOUtils.getDecodingReader(CzechAnalyzer.class, "stopwords.txt", StandardCharsets.UTF_8), "#");
} else if ("tr".equalsIgnoreCase(language)) {
return TurkishAnalyzer.loadStopwordSet(false, TurkishAnalyzer.class, "stopwords.txt", "#");
} else if ("ru".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "russian_stop.txt", StandardCharsets.UTF_8));
} else if ("ro".equalsIgnoreCase(language)) {
return RomanianAnalyzer.loadStopwordSet(false, RomanianAnalyzer.class, "stopwords.txt", "#");
} else if ("bg".equalsIgnoreCase(language)) {
return BulgarianAnalyzer.loadStopwordSet(false, BulgarianAnalyzer.class, "stopwords.txt", "#");
} else if ("nl".equalsIgnoreCase(language)) {
return WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class, "dutch_stop.txt", StandardCharsets.UTF_8));
}
} catch (Exception ignored) {
throw new RuntimeException("Unable to load default stopword set");
}
return StandardAnalyzer.STOP_WORDS_SET;
}
开发者ID:fiohol,项目名称:theSemProject,代码行数:42,代码来源:MyAnalyzer.java
示例5: get
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
@Override
public TurkishAnalyzer get() {
return this.analyzer;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:5,代码来源:TurkishAnalyzerProvider.java
示例6: Tokenizer
import org.apache.lucene.analysis.tr.TurkishAnalyzer; //导入依赖的package包/类
public Tokenizer() {
Analyzer en_analyzer = new EnglishAnalyzer(Version.LUCENE_44);
Analyzer es_analyzer = new SpanishAnalyzer(Version.LUCENE_44);
Analyzer de_analyzer = new GermanAnalyzer(Version.LUCENE_44);
Analyzer da_analyzer = new DanishAnalyzer(Version.LUCENE_44);
Analyzer el_analyzer = new GreekAnalyzer(Version.LUCENE_44);
Analyzer fr_analyzer = new FrenchAnalyzer(Version.LUCENE_44);
Analyzer it_analyzer = new ItalianAnalyzer(Version.LUCENE_44);
Analyzer pt_analyzer = new PortugueseAnalyzer(Version.LUCENE_44);
Analyzer ru_analyzer = new RussianAnalyzer(Version.LUCENE_44);
Analyzer fa_analyzer = new PersianAnalyzer(Version.LUCENE_44);
Analyzer ar_analyzer = new ArabicAnalyzer(Version.LUCENE_44);
Analyzer id_analyzer = new IndonesianAnalyzer(Version.LUCENE_44);
Analyzer pl_analyzer = new MorfologikAnalyzer(Version.LUCENE_44);
Analyzer nl_analyzer = new DutchAnalyzer(Version.LUCENE_44);
Analyzer no_analyzer = new NorwegianAnalyzer(Version.LUCENE_44);
Analyzer ro_analyzer = new RomanianAnalyzer(Version.LUCENE_44);
Analyzer sv_analyzer = new SwedishAnalyzer(Version.LUCENE_44);
Analyzer fi_analyzer = new FinnishAnalyzer(Version.LUCENE_44);
Analyzer tr_analyzer = new TurkishAnalyzer(Version.LUCENE_44);
Analyzer hu_analyzer = new HungarianAnalyzer(Version.LUCENE_44);
Analyzer bg_analyzer = new BulgarianAnalyzer(Version.LUCENE_44);
analyzers.put("en", en_analyzer);
analyzers.put("es", es_analyzer);
analyzers.put("de", de_analyzer);
analyzers.put("da", da_analyzer);
analyzers.put("el", el_analyzer);
analyzers.put("fr", fr_analyzer);
analyzers.put("it", it_analyzer);
analyzers.put("pt", pt_analyzer);
analyzers.put("ru", ru_analyzer);
analyzers.put("fa", fa_analyzer);
analyzers.put("ar", ar_analyzer);
analyzers.put("id", id_analyzer);
analyzers.put("pl", pl_analyzer);
analyzers.put("nl", nl_analyzer);
analyzers.put("no", no_analyzer);
analyzers.put("ro", ro_analyzer);
analyzers.put("sv", sv_analyzer);
analyzers.put("fi", fi_analyzer);
analyzers.put("tr", tr_analyzer);
analyzers.put("hu", hu_analyzer);
analyzers.put("bg", bg_analyzer);
}
开发者ID:socialsensor,项目名称:social-event-detection,代码行数:47,代码来源:Tokenizer.java
注:本文中的org.apache.lucene.analysis.tr.TurkishAnalyzer类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论