/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.lucene.analysis.standard;


import java.io.IOException;
import java.io.Reader;

import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.en.EnglishAnalyzer;

Filters ClassicTokenizer with ClassicFilter, LowerCaseFilter and StopFilter, using a list of English stop words. ClassicAnalyzer was named StandardAnalyzer in Lucene versions prior to 3.1. As of 3.1, StandardAnalyzer implements Unicode text segmentation, as specified by UAX#29.
Since:3.1
/** * Filters {@link ClassicTokenizer} with {@link ClassicFilter}, {@link * LowerCaseFilter} and {@link StopFilter}, using a list of * English stop words. * * ClassicAnalyzer was named StandardAnalyzer in Lucene versions prior to 3.1. * As of 3.1, {@link StandardAnalyzer} implements Unicode text segmentation, * as specified by UAX#29. * * @since 3.1 */
public final class ClassicAnalyzer extends StopwordAnalyzerBase {
Default maximum allowed token length
/** Default maximum allowed token length */
public static final int DEFAULT_MAX_TOKEN_LENGTH = 255; private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
An unmodifiable set containing some common English words that are usually not useful for searching.
/** An unmodifiable set containing some common English words that are usually not useful for searching. */
public static final CharArraySet STOP_WORDS_SET = EnglishAnalyzer.ENGLISH_STOP_WORDS_SET;
Builds an analyzer with the given stop words.
Params:
  • stopWords – stop words
/** Builds an analyzer with the given stop words. * @param stopWords stop words */
public ClassicAnalyzer(CharArraySet stopWords) { super(stopWords); }
Builds an analyzer with the default stop words (STOP_WORDS_SET).
/** Builds an analyzer with the default stop words ({@link * #STOP_WORDS_SET}). */
public ClassicAnalyzer() { this(STOP_WORDS_SET); }
Builds an analyzer with the stop words from the given reader.
Params:
  • stopwords – Reader to read stop words from
See Also:
  • getWordSet.getWordSet(Reader)
/** Builds an analyzer with the stop words from the given reader. * @see WordlistLoader#getWordSet(Reader) * @param stopwords Reader to read stop words from */
public ClassicAnalyzer(Reader stopwords) throws IOException { this(loadStopwordSet(stopwords)); }
Set maximum allowed token length. If a token is seen that exceeds this length then it is discarded. This setting only takes effect the next time tokenStream or tokenStream is called.
/** * Set maximum allowed token length. If a token is seen * that exceeds this length then it is discarded. This * setting only takes effect the next time tokenStream or * tokenStream is called. */
public void setMaxTokenLength(int length) { maxTokenLength = length; }
See Also:
  • setMaxTokenLength
/** * @see #setMaxTokenLength */
public int getMaxTokenLength() { return maxTokenLength; } @Override protected TokenStreamComponents createComponents(final String fieldName) { final ClassicTokenizer src = new ClassicTokenizer(); src.setMaxTokenLength(maxTokenLength); TokenStream tok = new ClassicFilter(src); tok = new LowerCaseFilter(tok); tok = new StopFilter(tok, stopwords); return new TokenStreamComponents(r -> { src.setMaxTokenLength(ClassicAnalyzer.this.maxTokenLength); src.setReader(r); }, tok); } @Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }