@Override
public void build(InputIterator iter) throws IOException {
if (searcherMgr != null) {
searcherMgr.close();
searcherMgr = null;
}
if (writer != null) {
writer.close();
writer = null;
}
boolean success = false;
try {
// First pass: build a temporary normal Lucene index,
// just indexing the suggestions as they iterate:
writer = new IndexWriter(dir,
getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE));
//long t0 = System.nanoTime();
// TODO: use threads?
BytesRef text;
while ((text = iter.next()) != null) {
BytesRef payload;
if (iter.hasPayloads()) {
payload = iter.payload();
} else {
payload = null;
}
add(text, iter.contexts(), iter.weight(), payload);
}
public void add(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
ensureOpen();
writer.addDocument(buildDocument(text, contexts, weight, payload));
}
?private Document buildDocument(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
String textString = text.utf8ToString();
Document doc = new Document();
FieldType ft = getTextFieldType();
doc.add(new Field(TEXT_FIELD_NAME, textString, ft));
doc.add(new Field("textgrams", textString, ft));
doc.add(new StringField(EXACT_TEXT_FIELD_NAME, textString, Field.Store.NO));
doc.add(new BinaryDocValuesField(TEXT_FIELD_NAME, text));
doc.add(new NumericDocValuesField("weight", weight));
if (payload != null) {
doc.add(new BinaryDocValuesField("payloads", payload));
}
if (contexts != null) {
for(BytesRef context : contexts) {
doc.add(new StringField(CONTEXTS_FIELD_NAME, context, Field.Store.NO));
doc.add(new SortedSetDocValuesField(CONTEXTS_FIELD_NAME, context));
}
}
return doc;
}
?private static final Sort SORT = new Sort(new SortField("weight", SortField.Type.LONG, true));
?if (allTermsRequired) {
occur = BooleanClause.Occur.MUST;
} else {
occur = BooleanClause.Occur.SHOULD;
}
?try (TokenStream ts = queryAnalyzer.tokenStream("", new StringReader(key.toString()))) {
//long t0 = System.currentTimeMillis();
ts.reset();
final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
final OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
String lastToken = null;
query = new BooleanQuery.Builder();
int maxEndOffset = -1;
matchedTokens = new HashSet<>();
while (ts.incrementToken()) {
if (lastToken != null) {
matchedTokens.add(lastToken);
query.add(new TermQuery(new Term(TEXT_FIELD_NAME, lastToken)), occur);
}
lastToken = termAtt.toString();
if (lastToken != null) {
maxEndOffset = Math.max(maxEndOffset, offsetAtt.endOffset());
}
}
?Set<BytesRef> contexts = new HashSet<>();
contexts.add(new BytesRef(region.getBytes("UTF8")));
List<Lookup.LookupResult> results = suggester.lookup(name, contexts, 2, true, false);
?<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<str name="name">default</str>
<str name="lookupImpl">FuzzyLookupFactory</str>
<str name="dictionaryImpl">DocumentDictionaryFactory</str>
<str name="field">suggest</str>
<str name="weightField"></str>
<str name="suggestAnalyzerFieldType">string</str>
<str name="buildOnStartup">false</str>
</lst>
</searchComponent>
?
<requestHandler name="/suggest" class="org.apache.solr.handler.component.SearchHandler"
startup="lazy" >
<lst name="defaults">
<str name="suggest">true</str>
<str name="suggest.count">10</str>
</lst>
<arr name="components">
<str>suggest</str>
</arr>
</requestHandler>
?writer = new IndexWriter(dir,
getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE));
BytesRef text;
while ((text = iter.next()) != null) {
BytesRef payload;
if (iter.hasPayloads()) {
payload = iter.payload();
} else {
payload = null;
}
add(text, iter.contexts(), iter.weight(), payload);
}
?<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
<!-- in this example, we will only use synonyms at query time
<filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-->
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
? <entity name="gt_brand" query="
select brand_id, brand_name, brand_pinyin, brand_name_second, sort from gt_goods_brand
" >
<field column="brand_id" name="id"/>
<field column="brand_name" name="brand_name"/>
<field column="brand_pinyin" name="brand_pinyin"/>
<field column="brand_name_second" name="brand_name_second"/>
<field column="sort" name="sort"/>
</entity>
?Directory indexDir = FSDirectory.open(Paths.get("/Users/xxx/develop/tools/solr-5.5.0/server/solr/suggest/data/index"));
StandardAnalyzer analyzer = new StandardAnalyzer();
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(indexDir, analyzer);
DirectoryReader directoryReader = DirectoryReader.open(indexDir);
DocumentDictionary documentDictionary = new DocumentDictionary(directoryReader, "brand_pinyin", "sort", "brand_name");
suggester.build(documentDictionary.getEntryIterator());
List<Lookup.LookupResult> cha = suggester.lookup("nijiazhubao", 5, false, false);
for (Lookup.LookupResult lookupResult : cha) {
// System.out.println(lookupResult.key);
// System.out.println(lookupResult.value);
System.out.println(new String(lookupResult.payload.bytes, "UTF8"));
}
?<str name="field">brand_pinyin</str>
<str name="weightField">sort</str>
<str name="payloadField">brand_name</str>
<str name="suggestAnalyzerFieldType">string</str>
<str name="buildOnStartup">true</str>
?
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<str name="name">default</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->
<str name="field">category_name</str>
<str name="weightField"></str>
<str name="suggestAnalyzerFieldType">string</str>
</lst>
</searchComponent>
<searchComponent name="suggest1" class="solr.SuggestComponent">
<lst name="suggester">
<str name="name">default</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->
<str name="field">brand_name</str>
<str name="weightField"></str>
<str name="suggestAnalyzerFieldType">string</str>
</lst>
</searchComponent>
<requestHandler name="/suggest" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<str name="suggest">true</str>
<str name="suggest.count">5</str>
</lst>
<arr name="components">
<str>suggest</str>
<str>suggest1</str>
</arr>
</requestHandler>
?出现问题:suggest: org.apache.solr.common.SolrException:org.apache.solr.common.SolrException: org.apache.lucene.store.LockObtainFailedException: Lock held by this virtual machine: /Users/xxx/develop/tools/solr-5.5.0/server/solr/suggest/data/analyzingInfixSuggesterIndexDir/write.lock??
String indexPath = params.get(INDEX_PATH) != null
? params.get(INDEX_PATH).toString()
: DEFAULT_INDEX_PATH;
if (new File(indexPath).isAbsolute() == false) {
indexPath = core.getDataDir() + File.separator + indexPath;
}
?原文:http://brandnewuser.iteye.com/blog/2297834