diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
index 8ae425af4e..923a9f115b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
@@ -433,13 +433,10 @@ public override void End()
this.offsetAtt.SetOffset(finalOffset, finalOffset);
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- this.initialized = false;
- }
+ base.Close();
+ this.initialized = false;
}
public override void Reset()
@@ -570,13 +567,10 @@ private bool IsStopWord(string text)
return stopWords != null && stopWords.Contains(text);
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- this.str = null;
- }
+ base.Close();
+ this.str = null;
}
public override void Reset()
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
index 67f22a9591..69c9e93802 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Miscellaneous
///
/// Links two .
///
- /// NOTE: This filter might not behave correctly if used with custom
+ /// NOTE: This filter might not behave correctly if used with custom
/// s, i.e. s other than
- /// the ones located in Lucene.Net.Analysis.TokenAttributes.
+ /// the ones located in Lucene.Net.Analysis.TokenAttributes.
///
public class PrefixAndSuffixAwareTokenFilter : TokenStream
{
private readonly PrefixAwareTokenFilter suffix;
- public PrefixAndSuffixAwareTokenFilter(TokenStream prefix, TokenStream input, TokenStream suffix)
+ public PrefixAndSuffixAwareTokenFilter(TokenStream prefix, TokenStream input, TokenStream suffix)
: base(suffix)
{
prefix = new PrefixAwareTokenFilterAnonymousClass(this, prefix, input);
@@ -40,7 +40,7 @@ private sealed class PrefixAwareTokenFilterAnonymousClass : PrefixAwareTokenFilt
{
private readonly PrefixAndSuffixAwareTokenFilter outerInstance;
- public PrefixAwareTokenFilterAnonymousClass(PrefixAndSuffixAwareTokenFilter outerInstance, TokenStream prefix, TokenStream input)
+ public PrefixAwareTokenFilterAnonymousClass(PrefixAndSuffixAwareTokenFilter outerInstance, TokenStream prefix, TokenStream input)
: base(prefix, input)
{
this.outerInstance = outerInstance;
@@ -56,7 +56,7 @@ private sealed class PrefixAwareTokenFilterAnonymousClass2 : PrefixAwareTokenFil
{
private readonly PrefixAndSuffixAwareTokenFilter outerInstance;
- public PrefixAwareTokenFilterAnonymousClass2(PrefixAndSuffixAwareTokenFilter outerInstance, TokenStream prefix, TokenStream suffix)
+ public PrefixAwareTokenFilterAnonymousClass2(PrefixAndSuffixAwareTokenFilter outerInstance, TokenStream prefix, TokenStream suffix)
: base(prefix, suffix)
{
this.outerInstance = outerInstance;
@@ -90,13 +90,9 @@ public override void Reset()
suffix.Reset();
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- if (disposing)
- {
- suffix.Dispose();
- }
- base.Dispose(disposing); // LUCENENET specific - disposable pattern requires calling the base class implementation
+ suffix.Close();
}
public override void End()
@@ -104,4 +100,4 @@ public override void End()
suffix.End();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAwareTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAwareTokenFilter.cs
index 118f67b03e..0395f4cd29 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAwareTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAwareTokenFilter.cs
@@ -24,10 +24,10 @@ namespace Lucene.Net.Analysis.Miscellaneous
///
/// Joins two token streams and leaves the last token of the first stream available
/// to be used when updating the token values in the second stream based on that token.
- ///
+ ///
/// The default implementation adds last prefix token end offset to the suffix token start and end offsets.
///
- /// NOTE: This filter might not behave correctly if used with custom
+ /// NOTE: This filter might not behave correctly if used with custom
/// s, i.e. s other than
/// the ones located in Lucene.Net.Analysis.TokenAttributes.
///
@@ -175,14 +175,10 @@ public override void End()
suffix.End();
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- if (disposing)
- {
- prefix.Dispose();
- suffix.Dispose();
- }
- base.Dispose(disposing); // LUCENENET specific - disposable pattern requires calling the base class implementation
+ prefix.Close();
+ suffix.Close();
}
public override void Reset()
@@ -211,4 +207,4 @@ public virtual TokenStream Suffix
set => this.suffix = value;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
index 5e8d983976..5006aa1ce4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
@@ -25,22 +25,22 @@ namespace Lucene.Net.Analysis.Standard
///
/// A grammar-based tokenizer constructed with JFlex (and then ported to .NET)
- ///
+ ///
/// This should be a good tokenizer for most European-language documents:
- ///
+ ///
///
- /// - Splits words at punctuation characters, removing punctuation. However, a
+ ///
- Splits words at punctuation characters, removing punctuation. However, a
/// dot that's not followed by whitespace is considered part of a token.
/// - Splits words at hyphens, unless there's a number in the token, in which case
/// the whole token is interpreted as a product number and is not split.
/// - Recognizes email addresses and internet hostnames as one token.
///
- ///
+ ///
///
/// Many applications have specific tokenizer needs. If this tokenizer does
/// not suit your application, please consider copying this source code
/// directory to your project and maintaining your own grammar-based tokenizer.
- ///
+ ///
/// was named in Lucene versions prior to 3.1.
/// As of 3.1, implements Unicode text segmentation,
/// as specified by UAX#29.
@@ -83,7 +83,7 @@ public sealed class ClassicTokenizer : Tokenizer
///
/// Set the max allowed token length. Any token longer
- /// than this is skipped.
+ /// than this is skipped.
///
public int MaxTokenLength
{
@@ -103,7 +103,7 @@ public int MaxTokenLength
///
/// lucene compatibility version
/// The input reader
- ///
+ ///
/// See http://issues.apache.org/jira/browse/LUCENE-1068
public ClassicTokenizer(LuceneVersion matchVersion, Reader input)
: base(input)
@@ -112,7 +112,7 @@ public ClassicTokenizer(LuceneVersion matchVersion, Reader input)
}
///
- /// Creates a new with a given
+ /// Creates a new with a given
///
public ClassicTokenizer(LuceneVersion matchVersion, AttributeFactory factory, Reader input)
: base(factory, input)
@@ -135,7 +135,7 @@ private void Init(LuceneVersion matchVersion)
private IOffsetAttribute offsetAtt;
private IPositionIncrementAttribute posIncrAtt;
private ITypeAttribute typeAtt;
-
+
/*
* (non-Javadoc)
*
@@ -193,13 +193,10 @@ public override sealed void End()
posIncrAtt.PositionIncrement = posIncrAtt.PositionIncrement + skippedPositions;
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- scanner.YyReset(m_input);
- }
+ base.Close();
+ scanner.YyReset(m_input);
}
public override void Reset()
@@ -209,4 +206,4 @@ public override void Reset()
skippedPositions = 0;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
index 30d6abd040..c33554f398 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
@@ -30,14 +30,14 @@ namespace Lucene.Net.Analysis.Standard
/// A grammar-based tokenizer constructed with JFlex.
///
/// As of Lucene version 3.1, this class implements the Word Break rules from the
- /// Unicode Text Segmentation algorithm, as specified in
+ /// Unicode Text Segmentation algorithm, as specified in
/// Unicode Standard Annex #29.
///
///
/// Many applications have specific tokenizer needs. If this tokenizer does
/// not suit your application, please consider copying this source code
/// directory to your project and maintaining your own grammar-based tokenizer.
- ///
+ ///
///
/// You must specify the required
/// compatibility when creating :
@@ -58,25 +58,25 @@ public sealed class StandardTokenizer : Tokenizer
private IStandardTokenizerInterface scanner;
public const int ALPHANUM = 0;
- /// @deprecated (3.1)
+ /// @deprecated (3.1)
[Obsolete("(3.1)")]
public const int APOSTROPHE = 1;
- /// @deprecated (3.1)
+ /// @deprecated (3.1)
[Obsolete("(3.1)")]
public const int ACRONYM = 2;
- /// @deprecated (3.1)
+ /// @deprecated (3.1)
[Obsolete("(3.1)")]
public const int COMPANY = 3;
public const int EMAIL = 4;
- /// @deprecated (3.1)
+ /// @deprecated (3.1)
[Obsolete("(3.1)")]
public const int HOST = 5;
public const int NUM = 6;
- /// @deprecated (3.1)
+ /// @deprecated (3.1)
[Obsolete("(3.1)")]
public const int CJ = 7;
- /// @deprecated (3.1)
+ /// @deprecated (3.1)
[Obsolete("(3.1)")]
public const int ACRONYM_DEP = 8;
@@ -111,7 +111,7 @@ public sealed class StandardTokenizer : Tokenizer
///
/// Set the max allowed token length. Any token longer
- /// than this is skipped.
+ /// than this is skipped.
///
public int MaxTokenLength
{
@@ -133,7 +133,7 @@ public int MaxTokenLength
///
/// Lucene compatibility version - See
/// The input reader
- ///
+ ///
/// See http://issues.apache.org/jira/browse/LUCENE-1068
public StandardTokenizer(LuceneVersion matchVersion, TextReader input)
: base(input)
@@ -142,7 +142,7 @@ public StandardTokenizer(LuceneVersion matchVersion, TextReader input)
}
///
- /// Creates a new with a given
+ /// Creates a new with a given
///
public StandardTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
: base(factory, input)
@@ -248,13 +248,10 @@ public override sealed void End()
posIncrAtt.PositionIncrement = posIncrAtt.PositionIncrement + skippedPositions;
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- scanner.YyReset(m_input);
- }
+ base.Close();
+ scanner.YyReset(m_input);
}
public override void Reset()
@@ -264,4 +261,4 @@ public override void Reset()
skippedPositions = 0;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
index efb952f336..2081b0c5b7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Standard
*/
///
- /// This class implements Word Break rules from the Unicode Text Segmentation
+ /// This class implements Word Break rules from the Unicode Text Segmentation
/// algorithm, as specified in `
- /// Unicode Standard Annex #29
+ /// Unicode Standard Annex #29
/// URLs and email addresses are also tokenized according to the relevant RFCs.
///
/// Tokens produced are of the following types:
@@ -89,7 +89,7 @@ public sealed class UAX29URLEmailTokenizer : Tokenizer
///
/// Set the max allowed token length. Any token longer
- /// than this is skipped.
+ /// than this is skipped.
///
public int MaxTokenLength
{
@@ -214,13 +214,10 @@ public override sealed void End()
posIncrAtt.PositionIncrement = posIncrAtt.PositionIncrement + skippedPositions;
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- scanner.YyReset(m_input);
- }
+ base.Close();
+ scanner.YyReset(m_input);
}
public override void Reset()
@@ -230,4 +227,4 @@ public override void Reset()
skippedPositions = 0;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
index 4f3f66adee..89b1e7352c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
@@ -357,7 +357,9 @@ protected Parser(bool dedup, Analyzer analyzer) // LUCENENET: CA1012: Abstract t
///
public virtual CharsRef Analyze(string text, CharsRef reuse)
{
- using (TokenStream ts = analyzer.GetTokenStream("", text))
+ Exception priorException = null;
+ TokenStream ts = analyzer.GetTokenStream("", text);
+ try
{
var termAtt = ts.AddAttribute();
var posIncAtt = ts.AddAttribute();
@@ -370,10 +372,12 @@ public virtual CharsRef Analyze(string text, CharsRef reuse)
{
throw new ArgumentException("term: " + text + " analyzed to a zero-length token");
}
+
if (posIncAtt.PositionIncrement != 1)
{
throw new ArgumentException("term: " + text + " analyzed to a token with posinc != 1");
}
+
reuse.Grow(reuse.Length + length + 1); // current + word + separator
int end = reuse.Offset + reuse.Length;
if (reuse.Length > 0)
@@ -381,11 +385,21 @@ public virtual CharsRef Analyze(string text, CharsRef reuse)
reuse.Chars[end++] = SynonymMap.WORD_SEPARATOR;
reuse.Length++;
}
+
Arrays.Copy(termAtt.Buffer, 0, reuse.Chars, end, length);
reuse.Length += length;
}
+
ts.End();
}
+ catch (Exception e) when (e.IsIOException())
+ {
+ priorException = e;
+ }
+ finally {
+ IOUtils.CloseWhileHandlingException(priorException, ts);
+ }
+
if (reuse.Length == 0)
{
throw new ArgumentException("term: " + text + " was completely eliminated by analyzer");
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
index 82f7ac9186..2f0a703640 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
@@ -319,13 +319,10 @@ private void SetupToken()
offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + termAtt.Length));
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- scanner.YyReset(m_input);
- }
+ base.Close();
+ scanner.YyReset(m_input);
}
///
diff --git a/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs b/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs
index edf76facd7..3b19ab5f3f 100644
--- a/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs
@@ -233,13 +233,10 @@ public GraphvizFormatter GraphvizFormatter
set => this.dotOut = value;
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- buffer.Reset(m_input);
- }
+ base.Close();
+ buffer.Reset(m_input);
}
public override void Reset()
diff --git a/src/Lucene.Net.Analysis.OpenNLP/OpenNLPLemmatizerFilter.cs b/src/Lucene.Net.Analysis.OpenNLP/OpenNLPLemmatizerFilter.cs
index 2b03cd420d..5dc0254d7d 100644
--- a/src/Lucene.Net.Analysis.OpenNLP/OpenNLPLemmatizerFilter.cs
+++ b/src/Lucene.Net.Analysis.OpenNLP/OpenNLPLemmatizerFilter.cs
@@ -133,24 +133,14 @@ private void Clear()
/// Releases resources used by the and
/// if overridden in a derived class, optionally releases unmanaged resources.
///
- /// true to release both managed and unmanaged resources;
- /// false to release only unmanaged resources.
-
- // LUCENENET specific
- protected override void Dispose(bool disposing)
+ ///
+ /// LUCENENET specific
+ ///
+ public override void Close()
{
- try
- {
- if (disposing)
- {
- sentenceTokenAttrsIter?.Dispose();
- sentenceTokenAttrsIter = null;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
+ sentenceTokenAttrsIter?.Dispose();
+ sentenceTokenAttrsIter = null;
+ base.Close();
}
}
}
diff --git a/src/Lucene.Net.Analysis.OpenNLP/OpenNLPTokenizer.cs b/src/Lucene.Net.Analysis.OpenNLP/OpenNLPTokenizer.cs
index 14e6a0dfb8..b1b5481e6a 100644
--- a/src/Lucene.Net.Analysis.OpenNLP/OpenNLPTokenizer.cs
+++ b/src/Lucene.Net.Analysis.OpenNLP/OpenNLPTokenizer.cs
@@ -67,14 +67,11 @@ public sealed class OpenNLPTokenizer : SegmentingTokenizerBase
this.offsetAtt = AddAttribute();
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- termSpans = null;
- termNum = sentenceStart = 0;
- }
+ base.Close();
+ termSpans = null;
+ termNum = sentenceStart = 0;
}
protected override void SetNextSentence(int sentenceStart, int sentenceEnd)
diff --git a/src/Lucene.Net.Analysis.SmartCn/HMMChineseTokenizer.cs b/src/Lucene.Net.Analysis.SmartCn/HMMChineseTokenizer.cs
index d76f44b417..89109d80bf 100644
--- a/src/Lucene.Net.Analysis.SmartCn/HMMChineseTokenizer.cs
+++ b/src/Lucene.Net.Analysis.SmartCn/HMMChineseTokenizer.cs
@@ -94,27 +94,16 @@ public override void Reset()
}
///
- /// Releases resources used by the and
- /// if overridden in a derived class, optionally releases unmanaged resources.
+ /// Releases resources used by the .
///
- /// true to release both managed and unmanaged resources;
- /// false to release only unmanaged resources.
-
- // LUCENENET specific
- protected override void Dispose(bool disposing)
+ ///
+ /// LUCENENET specific
+ ///
+ public override void Close()
{
- try
- {
- if (disposing)
- {
- tokens?.Dispose(); // LUCENENET specific - dispose tokens and set to null
- tokens = null;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
+ tokens?.Dispose(); // LUCENENET specific - dispose tokens and set to null
+ tokens = null;
+ base.Close();
}
}
}
diff --git a/src/Lucene.Net.Analysis.SmartCn/WordTokenFilter.cs b/src/Lucene.Net.Analysis.SmartCn/WordTokenFilter.cs
index 4965448fd2..74447fcf5a 100644
--- a/src/Lucene.Net.Analysis.SmartCn/WordTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.SmartCn/WordTokenFilter.cs
@@ -73,7 +73,7 @@ public override bool IncrementToken()
// a new sentence is available: process it.
tokenBuffer = wordSegmenter.SegmentSentence(termAtt.ToString(), offsetAtt.StartOffset);
tokenIter = tokenBuffer.GetEnumerator();
- /*
+ /*
* it should not be possible to have a sentence with 0 words, check just in case.
* returning EOS isn't the best either, but its the behavior of the original code.
*/
@@ -90,7 +90,7 @@ public override bool IncrementToken()
// WordTokenFilter must clear attributes, as it is creating new tokens.
ClearAttributes();
- // There are remaining tokens from the current sentence, return the next one.
+ // There are remaining tokens from the current sentence, return the next one.
SegToken nextWord = tokenIter.Current;
termAtt.CopyBuffer(nextWord.CharArray, 0, nextWord.CharArray.Length);
@@ -114,27 +114,16 @@ public override void Reset()
}
///
- /// Releases resources used by the and
- /// if overridden in a derived class, optionally releases unmanaged resources.
+ /// Releases resources used by the .
///
- /// true to release both managed and unmanaged resources;
- /// false to release only unmanaged resources.
-
- // LUCENENET specific
- protected override void Dispose(bool disposing)
+ ///
+ /// LUCENENET specific
+ ///
+ public override void Close()
{
- try
- {
- if (disposing)
- {
- tokenIter?.Dispose(); // LUCENENET specific - dispose tokenIter and set to null
- tokenIter = null;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
+ tokenIter?.Dispose(); // LUCENENET specific - dispose tokenIter and set to null
+ tokenIter = null;
+ base.Close();
}
}
}
diff --git a/src/Lucene.Net.Benchmark/ByTask/Tasks/ReadTokensTask.cs b/src/Lucene.Net.Benchmark/ByTask/Tasks/ReadTokensTask.cs
index b9dbb4eeab..584556df81 100644
--- a/src/Lucene.Net.Benchmark/ByTask/Tasks/ReadTokensTask.cs
+++ b/src/Lucene.Net.Benchmark/ByTask/Tasks/ReadTokensTask.cs
@@ -76,7 +76,7 @@ field is SingleField ||
continue;
}
- using TokenStream stream = field.GetTokenStream(analyzer);
+ TokenStream stream = field.GetTokenStream(analyzer);
// reset the TokenStream to the first token
stream.Reset();
@@ -87,6 +87,7 @@ field is SingleField ||
tokenCount++;
}
stream.End();
+ stream.Close();
}
totalTokenCount += tokenCount;
return tokenCount;
diff --git a/src/Lucene.Net.Benchmark/ByTask/Tasks/SearchTravRetHighlightTask.cs b/src/Lucene.Net.Benchmark/ByTask/Tasks/SearchTravRetHighlightTask.cs
index 00eedddc18..e7cf36612c 100644
--- a/src/Lucene.Net.Benchmark/ByTask/Tasks/SearchTravRetHighlightTask.cs
+++ b/src/Lucene.Net.Benchmark/ByTask/Tasks/SearchTravRetHighlightTask.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Benchmarks.ByTask.Tasks
/// Note: This task reuses the reader if it is already open.
/// Otherwise a reader is opened at start and closed at the end.
///
- /// Takes optional multivalued, comma separated param string as:
+ /// Takes optional multivalued, comma separated param string as:
///
/// size[<traversal size>],highlight[<int>],maxFrags[<int>],mergeContiguous[<boolean>],fields[name1;name2;...]
///
@@ -110,8 +110,7 @@ public BenchmarkHighlighterAnonymousClass(SearchTravRetHighlightTask outerInstan
public override int DoHighlight(IndexReader reader, int doc, string field, Document document, Analyzer analyzer, string text)
{
TokenStream ts = TokenSources.GetAnyTokenStream(reader, doc, field, document, analyzer);
- TextFragment[]
- frag = highlighter.GetBestTextFragments(ts, text, outerInstance.m_mergeContiguous, outerInstance.m_maxFrags);
+ TextFragment[] frag = highlighter.GetBestTextFragments(ts, text, outerInstance.m_mergeContiguous, outerInstance.m_maxFrags);
return frag != null ? frag.Length : 0;
}
}
diff --git a/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs b/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs
index b22dcd7d5c..3a50ae0a27 100644
--- a/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs
+++ b/src/Lucene.Net.Classification/SimpleNaiveBayesClassifier.cs
@@ -48,7 +48,7 @@ public class SimpleNaiveBayesClassifier : IClassifier
/// classify any documents.
///
public SimpleNaiveBayesClassifier()
- {
+ {
}
///
@@ -58,7 +58,7 @@ public SimpleNaiveBayesClassifier()
/// the reader to use to access the Lucene index
/// the name of the field containing the class assigned to documents
/// the name of the field used to compare documents
- public virtual void Train(AtomicReader atomicReader, string textFieldName, string classFieldName, Analyzer analyzer)
+ public virtual void Train(AtomicReader atomicReader, string textFieldName, string classFieldName, Analyzer analyzer)
{
Train(atomicReader, textFieldName, classFieldName, analyzer, null);
}
@@ -91,17 +91,17 @@ public virtual void Train(AtomicReader atomicReader, string[] textFieldNames, st
docsWithClassSize = CountDocsWithClass();
}
- private int CountDocsWithClass()
+ private int CountDocsWithClass()
{
int docCount = MultiFields.GetTerms(atomicReader, classFieldName).DocCount;
- if (docCount == -1)
+ if (docCount == -1)
{ // in case codec doesn't support getDocCount
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
BooleanQuery q = new BooleanQuery
{
new BooleanClause(new WildcardQuery(new Term(classFieldName, WildcardQuery.WILDCARD_STRING.ToString())), Occur.MUST)
};
- if (query != null)
+ if (query != null)
{
q.Add(query, Occur.MUST);
}
@@ -116,19 +116,19 @@ private string[] TokenizeDoc(string doc)
ICollection result = new LinkedList();
foreach (string textFieldName in textFieldNames) {
TokenStream tokenStream = analyzer.GetTokenStream(textFieldName, new StringReader(doc));
- try
+ try
{
ICharTermAttribute charTermAttribute = tokenStream.AddAttribute();
tokenStream.Reset();
- while (tokenStream.IncrementToken())
+ while (tokenStream.IncrementToken())
{
result.Add(charTermAttribute.ToString());
}
tokenStream.End();
- }
- finally
+ }
+ finally
{
- IOUtils.DisposeWhileHandlingException(tokenStream);
+ IOUtils.CloseWhileHandlingException(tokenStream);
}
}
var ret = new string[result.Count];
@@ -141,9 +141,9 @@ private string[] TokenizeDoc(string doc)
///
/// a string containing text to be classified
/// a holding assigned class of type and score
- public virtual ClassificationResult AssignClass(string inputDocument)
+ public virtual ClassificationResult AssignClass(string inputDocument)
{
- if (atomicReader is null)
+ if (atomicReader is null)
{
throw new IOException("You must first call Classifier#train");
}
@@ -154,11 +154,11 @@ public virtual ClassificationResult AssignClass(string inputDocument)
TermsEnum termsEnum = terms.GetEnumerator();
BytesRef next;
string[] tokenizedDoc = TokenizeDoc(inputDocument);
- while (termsEnum.MoveNext())
+ while (termsEnum.MoveNext())
{
next = termsEnum.Term;
double clVal = CalculateLogPrior(next) + CalculateLogLikelihood(tokenizedDoc, next);
- if (clVal > max)
+ if (clVal > max)
{
max = clVal;
foundClass = BytesRef.DeepCopyOf(next);
@@ -173,7 +173,7 @@ private double CalculateLogLikelihood(string[] tokenizedDoc, BytesRef c)
{
// for each word
double result = 0d;
- foreach (string word in tokenizedDoc)
+ foreach (string word in tokenizedDoc)
{
// search with text:word AND class:c
int hits = GetWordFreqForClass(word, c);
@@ -196,7 +196,7 @@ private double CalculateLogLikelihood(string[] tokenizedDoc, BytesRef c)
private double GetTextTermFreqForClass(BytesRef c)
{
double avgNumberOfUniqueTerms = 0;
- foreach (string textFieldName in textFieldNames)
+ foreach (string textFieldName in textFieldNames)
{
Terms terms = MultiFields.GetTerms(atomicReader, textFieldName);
long numPostings = terms.SumDocFreq; // number of term/doc pairs
@@ -210,13 +210,13 @@ private int GetWordFreqForClass(string word, BytesRef c)
{
BooleanQuery booleanQuery = new BooleanQuery();
BooleanQuery subQuery = new BooleanQuery();
- foreach (string textFieldName in textFieldNames)
+ foreach (string textFieldName in textFieldNames)
{
subQuery.Add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), Occur.SHOULD));
}
booleanQuery.Add(new BooleanClause(subQuery, Occur.MUST));
booleanQuery.Add(new BooleanClause(new TermQuery(new Term(classFieldName, c)), Occur.MUST));
- if (query != null)
+ if (query != null)
{
booleanQuery.Add(query, Occur.MUST);
}
@@ -230,9 +230,9 @@ private double CalculateLogPrior(BytesRef currentClass)
return Math.Log((double) DocCount(currentClass)) - Math.Log(docsWithClassSize);
}
- private int DocCount(BytesRef countedClass)
+ private int DocCount(BytesRef countedClass)
{
return atomicReader.DocFreq(new Term(classFieldName, countedClass));
}
- }
-}
\ No newline at end of file
+ }
+}
diff --git a/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs b/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs
index 857a30b96f..f061a868a0 100644
--- a/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs
@@ -435,7 +435,7 @@ private void CloseResources() // LUCENENET: Made private, since this has the sam
initializedReaderManager = false;
}
cache?.Dispose();
- parentStream.Dispose(); // LUCENENET specific
+ parentStream.Close(); // LUCENENET specific
}
finally
{
diff --git a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
index fae80ddc63..4d722a21ca 100644
--- a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
@@ -328,7 +328,7 @@ public TextFragment[] GetBestTextFragments(
try
{
tokenStream.End();
- tokenStream.Dispose();
+ tokenStream.Close();
}
catch (Exception e) when (e.IsException())
{
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/MultiTermHighlighting.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/MultiTermHighlighting.cs
index a336d53f70..d1d03047e0 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/MultiTermHighlighting.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/MultiTermHighlighting.cs
@@ -37,7 +37,7 @@ namespace Lucene.Net.Search.PostingsHighlight
internal class MultiTermHighlighting
{
///
- /// Extracts all s for , and returns equivalent
+ /// Extracts all s for , and returns equivalent
/// automata that will match terms.
///
internal static CharacterRunAutomaton[] ExtractAutomata(Query query, string field)
@@ -279,7 +279,7 @@ public override int NextPosition()
}
}
stream.End();
- stream.Dispose();
+ stream.Close();
stream = null;
}
// exhausted
@@ -336,4 +336,4 @@ public override long GetCost()
}
}
}
-#endif
\ No newline at end of file
+#endif
diff --git a/src/Lucene.Net.Memory/MemoryIndex.cs b/src/Lucene.Net.Memory/MemoryIndex.cs
index b72da0eb9c..9770d04fd7 100644
--- a/src/Lucene.Net.Memory/MemoryIndex.cs
+++ b/src/Lucene.Net.Memory/MemoryIndex.cs
@@ -242,17 +242,26 @@ public virtual void AddField(string fieldName, string text, Analyzer analyzer)
throw new ArgumentNullException(nameof(analyzer), "analyzer must not be null"); // LUCENENET specific - changed from IllegalArgumentException to ArgumentNullException (.NET convention)
}
- TokenStream stream;
+ // LUCENENET specific: dispose of the TokenStream when done here, instead of in AddField
+ TokenStream stream = null;
try
{
- stream = analyzer.GetTokenStream(fieldName, text);
+ try
+ {
+ stream = analyzer.GetTokenStream(fieldName, text);
+ }
+ catch (Exception ex) when (ex.IsIOException())
+ {
+ throw RuntimeException.Create(ex);
+ }
+
+ AddField(fieldName, stream, 1.0f, analyzer.GetPositionIncrementGap(fieldName),
+ analyzer.GetOffsetGap(fieldName));
}
- catch (Exception ex) when (ex.IsIOException())
+ finally
{
- throw RuntimeException.Create(ex);
+ stream?.Close();
}
-
- AddField(fieldName, stream, 1.0f, analyzer.GetPositionIncrementGap(fieldName), analyzer.GetOffsetGap(fieldName));
}
///
@@ -311,28 +320,11 @@ public override bool IncrementToken()
return true;
}
- ///
- /// Releases resources used by the and
- /// if overridden in a derived class, optionally releases unmanaged resources.
- ///
- /// true to release both managed and unmanaged resources;
- /// false to release only unmanaged resources.
-
- // LUCENENET specific
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- try
- {
- if (disposing)
- {
- iter?.Dispose(); // LUCENENET specific - dispose iter and set to null
- iter = null;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
+ iter?.Dispose(); // LUCENENET specific - dispose iter and set to null, can't be reused
+ iter = null;
+ base.Close();
}
}
@@ -509,7 +501,7 @@ public virtual void AddField(string fieldName, TokenStream stream, float boost,
{
if (stream != null)
{
- stream.Dispose();
+ stream.Close();
}
}
catch (Exception e2) when (e2.IsIOException())
diff --git a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
index b4e5528f68..e3a9e1f90e 100644
--- a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
+++ b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
@@ -77,11 +77,11 @@ namespace Lucene.Net.Queries.Mlt
///
/// IndexReader ir = ...
/// IndexSearcher is = ...
- ///
+ ///
/// MoreLikeThis mlt = new MoreLikeThis(ir);
/// TextReader target = ... // orig source of doc you want to find similarities to
/// Query query = mlt.Like(target);
- ///
+ ///
/// Hits hits = is.Search(query);
/// // now the usual iteration thru 'hits' - the only thing to watch for is to make sure
/// //you ignore the doc if it matches your 'target' document, as it should be similar to itself
@@ -192,7 +192,7 @@ public sealed class MoreLikeThis
public static readonly int DEFAULT_MAX_QUERY_TERMS = 25;
// LUCNENENET NOTE: The following fields were made into auto-implemented properties:
- // analyzer, minTermFreq, minDocFreq, maxDocFreq, boost,
+ // analyzer, minTermFreq, minDocFreq, maxDocFreq, boost,
// fieldNames, maxNumTokensParsed, minWordLen, maxWordLen,
// maxQueryTerms, similarity
@@ -250,7 +250,7 @@ public MoreLikeThis(IndexReader ir, TFIDFSimilarity sim)
///
/// Gets or Sets an analyzer that will be used to parse source doc with. The default analyzer
- /// is not set. An analyzer is not required for generating a query with the
+ /// is not set. An analyzer is not required for generating a query with the
/// method, all other 'like' methods require an analyzer.
///
public Analyzer Analyzer { get; set; }
@@ -299,7 +299,7 @@ public void SetMaxDocFreqPct(int maxPercentage)
///
/// Gets or Sets the field names that will be used when generating the 'More Like This' query.
- /// The default field names that will be used is .
+ /// The default field names that will be used is .
/// Set this to null for the field names to be determined at runtime from the
/// provided in the constructor.
///
@@ -617,7 +617,7 @@ private void AddTermFrequencies(TextReader r, IDictionary termFre
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
@@ -790,4 +790,4 @@ internal ScoreTerm(string word, string topField, float score, float idf, int doc
///
public int Tf { get; private set; }
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs b/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs
index c5a4954c0e..5db599ac2f 100644
--- a/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Analyzing/AnalyzingQueryParser.cs
@@ -39,14 +39,14 @@ namespace Lucene.Net.QueryParsers.Analyzing
/// ? don't get removed from the search terms.
///
/// Warning: This class should only be used with analyzers that do not use stopwords
- /// or that add tokens. Also, several stemming analyzers are inappropriate: for example,
- /// will turn Häuser into hau, but H?user will
+ /// or that add tokens. Also, several stemming analyzers are inappropriate: for example,
+ /// will turn Häuser into hau, but H?user will
/// become h?user when using this parser and thus no match would be found (i.e.
- /// using this parser will be no improvement over QueryParser in such cases).
+ /// using this parser will be no improvement over QueryParser in such cases).
///
public class AnalyzingQueryParser : Classic.QueryParser
{
- // gobble escaped chars or find a wildcard character
+ // gobble escaped chars or find a wildcard character
private readonly Regex wildcardPattern = new Regex(@"(\\.)|([?*]+)", RegexOptions.Compiled);
public AnalyzingQueryParser(LuceneVersion matchVersion, string field, Analyzer analyzer)
@@ -158,7 +158,7 @@ protected internal override Query GetFuzzyQuery(string field, string termStr, fl
///
/// Returns the analyzed form for the given chunk.
- ///
+ ///
/// If the analyzer produces more than one output token from the given chunk,
/// a ParseException is thrown.
///
@@ -219,7 +219,7 @@ protected internal virtual string AnalyzeSingleChunk(string field, string termSt
}
finally
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
return analyzed;
}
diff --git a/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs b/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs
index eff5d5c4f5..cb2a49b695 100644
--- a/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs
+++ b/src/Lucene.Net.QueryParser/Classic/QueryParserBase.cs
@@ -623,7 +623,7 @@ protected internal virtual BytesRef AnalyzeMultitermTerm(string field, string pa
}
finally
{
- IOUtils.DisposeWhileHandlingException(source);
+ IOUtils.CloseWhileHandlingException(source);
}
}
diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs
index e357b50af5..d1d286b161 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/AnalyzerQueryNodeProcessor.cs
@@ -153,7 +153,7 @@ protected override IQueryNode PostProcessNode(IQueryNode node)
}
finally
{
- IOUtils.DisposeWhileHandlingException(source);
+ IOUtils.CloseWhileHandlingException(source);
}
// rewind the buffer stream
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs
index de2736e0cb..972f9c3c32 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/LikeThisQueryBuilder.cs
@@ -94,7 +94,7 @@ public virtual Query GetQuery(XmlElement e)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
}
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs
index 3e71751924..7ed070b634 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/SpanOrTermsBuilder.cs
@@ -69,7 +69,7 @@ public override SpanQuery GetSpanQuery(XmlElement e)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
}
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs
index 7ee0b46f23..8a71cb0ead 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/TermsFilterBuilder.cs
@@ -69,7 +69,7 @@ public virtual Filter GetFilter(XmlElement e)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
return new TermsFilter(fieldName, terms);
}
diff --git a/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs b/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs
index 326cae490d..0d79ee7e2e 100644
--- a/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs
+++ b/src/Lucene.Net.QueryParser/Xml/Builders/TermsQueryBuilder.cs
@@ -67,7 +67,7 @@ public virtual Query GetQuery(XmlElement e)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
bq.Boost = DOMUtils.GetAttribute(e, "boost", 1.0f);
diff --git a/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs b/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
index 3c5da3287c..7f66784fef 100644
--- a/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
+++ b/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
@@ -267,7 +267,7 @@ private void AddTerms(IndexReader reader, FieldVals f)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
diff --git a/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs b/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
index c01a743f19..2b91de964e 100644
--- a/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
+++ b/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Spatial.Prefix
/// subclasses are RecursivePrefixTreeStrategy and
/// TermQueryPrefixTreeStrategy. This strategy is most effective as a fast
/// approximate spatial search filter.
- ///
+ ///
/// Characteristics:
///
/// - Can index any shape; however only
@@ -60,7 +60,7 @@ namespace Lucene.Net.Spatial.Prefix
/// it doesn't scale to large numbers of points nor is it real-time-search
/// friendly.
///
- ///
+ ///
/// Implementation:
/// The
/// does most of the work, for example returning
@@ -197,27 +197,10 @@ public override bool IncrementToken()
return false;
}
- ///
- /// Releases resources used by the and
- /// if overridden in a derived class, optionally releases unmanaged resources.
- ///
- /// true to release both managed and unmanaged resources;
- /// false to release only unmanaged resources.
-
- // LUCENENET specific
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- try
- {
- if (disposing)
- {
- iter.Dispose(); // LUCENENET specific - dispose iter
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
+ iter.Dispose();
+ base.Close();
}
}
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
index 48e42b4778..b897ebcf32 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
@@ -45,19 +45,19 @@ namespace Lucene.Net.Search.Suggest.Analyzing
/// Analyzes the input text and then suggests matches based
/// on prefix matches to any tokens in the indexed text.
/// This also highlights the tokens that match.
- ///
+ ///
/// This suggester supports payloads. Matches are sorted only
/// by the suggest weight; it would be nice to support
/// blended score + weight sort in the future. This means
/// this suggester best applies when there is a strong
/// a-priori ranking of all the suggestions.
- ///
+ ///
///
/// This suggester supports contexts, however the
/// contexts must be valid utf8 (arbitrary binary terms will
/// not work).
- ///
- /// @lucene.experimental
+ ///
+ /// @lucene.experimental
///
///
@@ -71,13 +71,13 @@ public class AnalyzingInfixSuggester : Lookup, IDisposable
///
/// Field name used for the indexed text, as a
- /// , for exact lookup.
+ /// , for exact lookup.
///
protected const string EXACT_TEXT_FIELD_NAME = "exacttext";
///
/// Field name used for the indexed context, as a
- /// and a , for filtering.
+ /// and a , for filtering.
///
protected const string CONTEXTS_FIELD_NAME = "contexts";
@@ -104,7 +104,7 @@ public class AnalyzingInfixSuggester : Lookup, IDisposable
///
/// Default minimum number of leading characters before
- /// PrefixQuery is used (4).
+ /// PrefixQuery is used (4).
///
public const int DEFAULT_MIN_PREFIX_CHARS = 4;
@@ -114,11 +114,11 @@ public class AnalyzingInfixSuggester : Lookup, IDisposable
///
/// Create a new instance, loading from a previously built
- /// directory, if it exists.
+ /// directory, if it exists.
/// This directory must be
/// private to the infix suggester (i.e., not an external
/// Lucene index). Note that
- /// will also dispose the provided directory.
+ /// will also dispose the provided directory.
///
public AnalyzingInfixSuggester(LuceneVersion matchVersion, Directory dir, Analyzer analyzer)
: this(matchVersion, dir, analyzer, analyzer, DEFAULT_MIN_PREFIX_CHARS)
@@ -212,13 +212,13 @@ public AnalyzingInfixSuggester(IAnalyzingInfixSuggesterIndexWriterConfigFactory
}
}
- /// LUCENENET specific - moved IndexWriterConfig GetIndexWriterConfig to
+ /// LUCENENET specific - moved IndexWriterConfig GetIndexWriterConfig to
/// class
/// to allow for customizing the index writer config.
///
- /// Subclass can override to choose a specific
- /// implementation.
+ /// Subclass can override to choose a specific
+ /// implementation.
///
protected internal virtual Directory GetDirectory(DirectoryInfo path)
{
@@ -365,11 +365,11 @@ private void EnsureOpen()
/// instead if you want to replace a previous suggestion.
/// After adding or updating a batch of new suggestions,
/// you must call in the end in order to
- /// see the suggestions in
+ /// see the suggestions in
///
public virtual void Add(BytesRef text, IEnumerable contexts, long weight, BytesRef payload)
{
- EnsureOpen(); //LUCENENET specific -Support for LUCENE - 5889.
+ EnsureOpen(); //LUCENENET specific -Support for LUCENE - 5889.
writer.AddDocument(BuildDocument(text, contexts, weight, payload));
}
@@ -377,10 +377,10 @@ public virtual void Add(BytesRef text, IEnumerable contexts, long weig
/// Updates a previous suggestion, matching the exact same
/// text as before. Use this to change the weight or
/// payload of an already added suggstion. If you know
- /// this text is not already present you can use
+ /// this text is not already present you can use
/// instead. After adding or updating a batch of
/// new suggestions, you must call in the
- /// end in order to see the suggestions in
+ /// end in order to see the suggestions in
///
public virtual void Update(BytesRef text, IEnumerable contexts, long weight, BytesRef payload)
{
@@ -419,7 +419,7 @@ private Document BuildDocument(BytesRef text, IEnumerable contexts, lo
///
/// Reopens the underlying searcher; it's best to "batch
/// up" many additions/updates, and then call refresh
- /// once in the end.
+ /// once in the end.
///
public virtual void Refresh()
{
@@ -459,7 +459,7 @@ public virtual IList DoLookup(string key, int num, bool allTermsRe
///
/// This is called if the last token isn't ended
/// (e.g. user did not type a space after it). Return an
- /// appropriate clause to add to the .
+ /// appropriate clause to add to the .
///
protected internal virtual Query GetLastTokenQuery(string token)
{
@@ -475,7 +475,7 @@ protected internal virtual Query GetLastTokenQuery(string token)
///
/// Retrieve suggestions, specifying whether all terms
/// must match () and whether the hits
- /// should be highlighted ().
+ /// should be highlighted ().
///
public virtual IList DoLookup(string key, IEnumerable contexts, int num, bool allTermsRequired, bool doHighlight)
{
@@ -577,7 +577,7 @@ public virtual IList DoLookup(string key, IEnumerable co
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
// TODO: we could allow blended sort here, combining
@@ -692,7 +692,7 @@ protected internal virtual IList CreateResults(IndexSearcher searc
///
/// Subclass can override this to tweak the Query before
- /// searching.
+ /// searching.
///
protected internal virtual Query FinishQuery(BooleanQuery bq, bool allTermsRequired)
{
@@ -703,7 +703,7 @@ protected internal virtual Query FinishQuery(BooleanQuery bq, bool allTermsRequi
/// Override this method to customize the Object
/// representing a single highlighted suggestions; the
/// result is set on each
- /// member.
+ /// member.
///
protected internal virtual object Highlight(string text, ICollection matchedTokens, string prefixToken)
{
@@ -752,7 +752,7 @@ protected internal virtual object Highlight(string text, ICollection mat
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
index d36076977d..985ce415ca 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
@@ -37,17 +37,17 @@ namespace Lucene.Net.Search.Suggest.Analyzing
/// thing at lookup time. This means lookup is based on the
/// analyzed form while suggestions are still the surface
/// form(s).
- ///
+ ///
///
/// This can result in powerful suggester functionality. For
- /// example, if you use an analyzer removing stop words,
+ /// example, if you use an analyzer removing stop words,
/// then the partial text "ghost chr..." could see the
/// suggestion "The Ghost of Christmas Past". Note that
/// position increments MUST NOT be preserved for this example
- /// to work, so you should call the constructor with
- /// parameter set to
+ /// to work, so you should call the constructor with
+ /// parameter set to
/// false
- ///
+ ///
///
///
/// If SynonymFilter is used to map wifi and wireless network to
@@ -55,35 +55,35 @@ namespace Lucene.Net.Search.Suggest.Analyzing
/// "wifi router". Token normalization like stemmers, accent
/// removal, etc., would allow suggestions to ignore such
/// variations.
- ///
+ ///
///
///
/// When two matching suggestions have the same weight, they
/// are tie-broken by the analyzed form. If their analyzed
/// form is the same then the order is undefined.
- ///
+ ///
///
///
/// There are some limitations:
///
- ///
+ ///
/// - A lookup from a query like "net" in English won't
/// be any different than "net " (ie, user added a
/// trailing space) because analyzers don't reflect
/// when they've seen a token separator and when they
/// haven't.
- ///
+ ///
/// - If you're using , and the user will
/// type "fast apple", but so far all they've typed is
/// "fast a", again because the analyzer doesn't convey whether
/// it's seen a token separator after the "a",
/// will remove that "a" causing
/// far more matches than you'd expect.
- ///
+ ///
/// - Lookups with the empty string return no results
/// instead of all results.
///
- ///
+ ///
/// @lucene.experimental
///
///
@@ -122,19 +122,19 @@ public class AnalyzingSuggester : Lookup
///
/// Represents the separation between tokens, if
- /// was specified
+ /// was specified
///
private const int SEP_LABEL = '\u001F';
///
/// Marks end of the analyzed input and start of dedup
- /// byte.
+ /// byte.
///
private const int END_BYTE = 0x0;
///
/// Maximum number of dup surface forms (different surface
- /// forms for the same analyzed form).
+ /// forms for the same analyzed form).
///
private readonly int maxSurfaceFormsPerAnalyzedForm;
@@ -142,14 +142,14 @@ public class AnalyzingSuggester : Lookup
/// Maximum graph paths to index for a single analyzed
/// surface form. This only matters if your analyzer
/// makes lots of alternate paths (e.g. contains
- /// SynonymFilter).
+ /// SynonymFilter).
///
private readonly int maxGraphExpansions;
///
/// Highest number of analyzed paths we saw for any single
/// input surface form. For analyzers that never create
- /// graphs this will always be 1.
+ /// graphs this will always be 1.
///
private int maxAnalyzedPathsForOneInput;
@@ -307,7 +307,7 @@ private void ReplaceSep(Automaton a)
///
/// Used by subclass to change the lookup automaton, if
- /// necessary.
+ /// necessary.
///
protected internal virtual Automaton ConvertAutomaton(Automaton a)
{
@@ -969,7 +969,7 @@ internal ISet ToFiniteStrings(BytesRef surfaceForm, TokenStreamToAuto
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
ReplaceSep(automaton);
@@ -999,7 +999,7 @@ internal Automaton ToLookupAutomaton(string key)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
// TODO: we could use the end offset to "guess"
@@ -1046,4 +1046,4 @@ private static int EncodeWeight(long value)
internal static readonly IComparer.Pair> weightComparer =
Comparer.Pair>.Create((left, right) => Comparer.Default.Compare(left.Output1, right.Output1));
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
index d16cb1dbde..6483c53540 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
@@ -763,7 +763,7 @@ public virtual IList DoLookup(string key, IEnumerable co
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
@@ -873,4 +873,4 @@ public virtual object Get(string key)
throw UnsupportedOperationException.Create();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs b/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs
index 6ea882ec7a..21f82c9591 100644
--- a/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/BaseTokenStreamTestCase.cs
@@ -124,7 +124,6 @@ public static void AssertTokenStreamContents(TokenStream ts, string[] output, in
// Added this try-finally block to fix this.
try
{
-
Assert.IsNotNull(output);
var checkClearAtt = ts.AddAttribute();
@@ -390,6 +389,7 @@ public static void AssertTokenStreamContents(TokenStream ts, string[] output, in
}
catch (Exception)
{
+ // TODO: #271 review this, not in upstream code
//ts.Reset();
ts.ClearAttributes();
ts.End();
@@ -397,7 +397,7 @@ public static void AssertTokenStreamContents(TokenStream ts, string[] output, in
}
finally
{
- ts.Dispose();
+ ts.Close();
}
}
@@ -556,7 +556,7 @@ internal static void CheckResetException(Analyzer a, string input)
{
}
ts.End();
- ts.Dispose();
+ ts.Close();
}
// check for a missing Close()
@@ -577,7 +577,7 @@ internal static void CheckResetException(Analyzer a, string input)
}
finally
{
- ts.Dispose();
+ ts.Close();
}
}
@@ -960,16 +960,21 @@ private static void CheckAnalysisConsistency(Random random, Analyzer a, bool use
int remainder = random.Next(10);
TextReader reader = new StringReader(text);
- TokenStream ts;
- using (ts = a.GetTokenStream("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader))
+ TokenStream ts = a.GetTokenStream("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader);
+
+ try
{
bool isReset = false;
try
{
termAtt = ts.HasAttribute() ? ts.GetAttribute() : null;
offsetAtt = ts.HasAttribute() ? ts.GetAttribute() : null;
- posIncAtt = ts.HasAttribute() ? ts.GetAttribute() : null;
- posLengthAtt = ts.HasAttribute() ? ts.GetAttribute() : null;
+ posIncAtt = ts.HasAttribute()
+ ? ts.GetAttribute()
+ : null;
+ posLengthAtt = ts.HasAttribute()
+ ? ts.GetAttribute()
+ : null;
typeAtt = ts.HasAttribute() ? ts.GetAttribute() : null;
ts.Reset();
@@ -984,34 +989,40 @@ private static void CheckAnalysisConsistency(Random random, Analyzer a, bool use
{
types.Add(typeAtt.Type);
}
+
if (posIncAtt != null)
{
positions.Add(posIncAtt.PositionIncrement);
}
+
if (posLengthAtt != null)
{
positionLengths.Add(posLengthAtt.PositionLength);
}
+
if (offsetAtt != null)
{
startOffsets.Add(offsetAtt.StartOffset);
endOffsets.Add(offsetAtt.EndOffset);
}
}
+
+ ts.End();
+
// LUCENENET: We are doing this in the finally block to ensure it happens
// when there are exeptions thrown (such as when the assert fails).
- //ts.End();
//ts.Dispose();
}
finally
{
+ // TODO: #271 - review this if block, not in upstream code
if (!isReset)
{
try
{
// consume correctly
ts.Reset();
- while (ts.IncrementToken());
+ while (ts.IncrementToken()) ;
//ts.End();
//ts.Dispose();
}
@@ -1022,9 +1033,12 @@ private static void CheckAnalysisConsistency(Random random, Analyzer a, bool use
// ignore
}
}
- ts.End(); // ts.end();
}
- } // ts.close();
+ }
+ finally
+ {
+ ts.Close();
+ }
// verify reusing is "reproducable" and also get the normal tokenstream sanity checks
if (tokens.Count > 0)
@@ -1080,7 +1094,7 @@ private static void CheckAnalysisConsistency(Random random, Analyzer a, bool use
}
finally
{
- ts.Dispose();
+ ts.Close();
}
}
else if (evilness == 7)
@@ -1112,7 +1126,7 @@ private static void CheckAnalysisConsistency(Random random, Analyzer a, bool use
}
finally
{
- ts.Dispose();
+ ts.Close();
}
}
}
@@ -1140,35 +1154,50 @@ private static void CheckAnalysisConsistency(Random random, Analyzer a, bool use
}
ts = a.GetTokenStream("dummy", useCharFilter ? (TextReader)new MockCharFilter(reader, remainder) : reader);
- if (typeAtt != null && posIncAtt != null && posLengthAtt != null && offsetAtt != null)
- {
- // offset + pos + posLength + type
- AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets), types.ToArray(), ToIntArray(positions), ToIntArray(positionLengths), text.Length, offsetsAreCorrect);
- }
- else if (typeAtt != null && posIncAtt != null && offsetAtt != null)
- {
- // offset + pos + type
- AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets), types.ToArray(), ToIntArray(positions), null, text.Length, offsetsAreCorrect);
- }
- else if (posIncAtt != null && posLengthAtt != null && offsetAtt != null)
- {
- // offset + pos + posLength
- AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets), null, ToIntArray(positions), ToIntArray(positionLengths), text.Length, offsetsAreCorrect);
- }
- else if (posIncAtt != null && offsetAtt != null)
- {
- // offset + pos
- AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets), null, ToIntArray(positions), null, text.Length, offsetsAreCorrect);
- }
- else if (offsetAtt != null)
+
+ try
{
- // offset
- AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets), null, null, null, text.Length, offsetsAreCorrect);
+ if (typeAtt != null && posIncAtt != null && posLengthAtt != null && offsetAtt != null)
+ {
+ // offset + pos + posLength + type
+ AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets),
+ types.ToArray(), ToIntArray(positions), ToIntArray(positionLengths), text.Length,
+ offsetsAreCorrect);
+ }
+ else if (typeAtt != null && posIncAtt != null && offsetAtt != null)
+ {
+ // offset + pos + type
+ AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets),
+ types.ToArray(), ToIntArray(positions), null, text.Length, offsetsAreCorrect);
+ }
+ else if (posIncAtt != null && posLengthAtt != null && offsetAtt != null)
+ {
+ // offset + pos + posLength
+ AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets),
+ null, ToIntArray(positions), ToIntArray(positionLengths), text.Length, offsetsAreCorrect);
+ }
+ else if (posIncAtt != null && offsetAtt != null)
+ {
+ // offset + pos
+ AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets),
+ null, ToIntArray(positions), null, text.Length, offsetsAreCorrect);
+ }
+ else if (offsetAtt != null)
+ {
+ // offset
+ AssertTokenStreamContents(ts, tokens.ToArray(), ToIntArray(startOffsets), ToIntArray(endOffsets),
+ null, null, null, text.Length, offsetsAreCorrect);
+ }
+ else
+ {
+ // terms only
+ AssertTokenStreamContents(ts, tokens.ToArray());
+ }
}
- else
+ finally
{
- // terms only
- AssertTokenStreamContents(ts, tokens.ToArray());
+ ts.Close();
+
}
if (field != null)
diff --git a/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs b/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs
index c7cb9be00f..d9071b30e0 100644
--- a/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/CollationTestBase.cs
@@ -44,7 +44,7 @@ public abstract class CollationTestBase : LuceneTestCase
// LUCENENET: The all locales may are not available for collation.
// LUCENENET: Removed this (only) reference to the ICU library, since it has a lot of data and we don't
- // want to unnecessarily reference it in all test projects.
+ // want to unnecessarily reference it in all test projects.
//protected readonly string[] availableCollationLocales = RuleBasedCollator.GetAvailableCollationLocales().ToArray();
///
@@ -154,13 +154,13 @@ public virtual void TestFarsiTermRangeQuery(Analyzer analyzer, BytesRef firstBeg
//
// TODO: this test is really fragile. there are already 3 different cases,
// depending upon unicode version.
- public virtual void TestCollationKeySort(Analyzer usAnalyzer,
- Analyzer franceAnalyzer,
- Analyzer swedenAnalyzer,
- Analyzer denmarkAnalyzer,
- string usResult,
- string frResult,
- string svResult,
+ public virtual void TestCollationKeySort(Analyzer usAnalyzer,
+ Analyzer franceAnalyzer,
+ Analyzer swedenAnalyzer,
+ Analyzer denmarkAnalyzer,
+ string usResult,
+ string frResult,
+ string svResult,
string dkResult)
{
using Directory indexStore = NewDirectory();
@@ -272,7 +272,7 @@ public virtual void AssertThreadSafe(Analyzer analyzer)
}
finally
{
- IOUtils.DisposeWhileHandlingException(priorException, ts);
+ IOUtils.CloseWhileHandlingException(priorException, ts);
}
}
@@ -329,7 +329,7 @@ public override void Run()
}
finally
{
- IOUtils.DisposeWhileHandlingException(priorException, ts);
+ IOUtils.CloseWhileHandlingException(priorException, ts);
}
}
}
@@ -340,4 +340,4 @@ public override void Run()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.TestFramework/Analysis/CrankyTokenFilter.cs b/src/Lucene.Net.TestFramework/Analysis/CrankyTokenFilter.cs
index d1c29d78e0..4a78c7c4a5 100644
--- a/src/Lucene.Net.TestFramework/Analysis/CrankyTokenFilter.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/CrankyTokenFilter.cs
@@ -69,15 +69,12 @@ public override void Reset()
}
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
+ base.Close();
+ if (thingToDo == 3 && random.nextBoolean())
{
- if (thingToDo == 3 && random.nextBoolean())
- {
- throw new IOException("Fake IOException from TokenStream.Dispose(bool)");
- }
+ throw new IOException("Fake IOException from TokenStream.DoClose()");
}
}
}
diff --git a/src/Lucene.Net.TestFramework/Analysis/MockGraphTokenFilter.cs b/src/Lucene.Net.TestFramework/Analysis/MockGraphTokenFilter.cs
index e52443d75d..72aff070f7 100644
--- a/src/Lucene.Net.TestFramework/Analysis/MockGraphTokenFilter.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/MockGraphTokenFilter.cs
@@ -111,13 +111,10 @@ public override void Reset()
this.random = new J2N.Randomizer(seed);
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- if (disposing)
- {
- base.Dispose(disposing);
- this.random = null;
- }
+ base.Close();
+ this.random = null;
}
public override bool IncrementToken()
@@ -133,4 +130,4 @@ public override bool IncrementToken()
return NextToken();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs b/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs
index 3b72dbbd95..865435fc06 100644
--- a/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs
@@ -305,22 +305,19 @@ public override void Reset()
streamState = State.RESET;
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- // in some exceptional cases (e.g. TestIndexWriterExceptions) a test can prematurely close()
- // these tests should disable this check, by default we check the normal workflow.
- // TODO: investigate the CachingTokenFilter "double-close"... for now we ignore this
- if (Debugging.AssertsEnabled) Debugging.Assert(!enableChecks || streamState == State.END || streamState == State.CLOSE,"Dispose() called in wrong state: {0}", streamState);
- streamState = State.CLOSE;
- }
+ base.Close();
+ // in some exceptional cases (e.g. TestIndexWriterExceptions) a test can prematurely close()
+ // these tests should disable this check, by default we check the normal workflow.
+ // TODO: investigate the CachingTokenFilter "double-close"... for now we ignore this
+ if (Debugging.AssertsEnabled) Debugging.Assert(!enableChecks || streamState == State.END || streamState == State.CLOSE, "Close() called in wrong state: {0}", streamState);
+ streamState = State.CLOSE;
}
internal override bool SetReaderTestPoint()
{
- if (Debugging.AssertsEnabled) Debugging.Assert(!enableChecks || streamState == State.CLOSE,"SetReader() called in wrong state: {0}", streamState);
+ if (Debugging.AssertsEnabled) Debugging.Assert(!enableChecks || streamState == State.CLOSE, "SetReader() called in wrong state: {0}", streamState);
streamState = State.SETREADER;
return true;
}
@@ -352,4 +349,4 @@ public virtual bool EnableChecks
set => enableChecks = value;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Cn/TestChineseTokenizer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Cn/TestChineseTokenizer.cs
index fcbc1150ba..48166d6af1 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Cn/TestChineseTokenizer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Cn/TestChineseTokenizer.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Cn
* limitations under the License.
*/
- /// @deprecated Remove this test when ChineseAnalyzer is removed.
+ /// @deprecated Remove this test when ChineseAnalyzer is removed.
[Obsolete("Remove this test when ChineseAnalyzer is removed.")]
public class TestChineseTokenizer : BaseTokenStreamTestCase
{
@@ -47,7 +47,7 @@ public virtual void TestOtherLetterOffset()
correctEndOffset++;
}
tokenizer.End();
- tokenizer.Dispose();
+ tokenizer.Close();
}
[Test]
@@ -100,7 +100,7 @@ public virtual void TestNumerics()
/*
* ChineseTokenizer tokenizes english similar to SimpleAnalyzer.
* it will lowercase terms automatically.
- *
+ *
* ChineseFilter has an english stopword list, it also removes any single character tokens.
* the stopword list is case-sensitive.
*/
@@ -125,4 +125,4 @@ public virtual void TestRandomStrings()
CheckRandomData(Random, new ChineseAnalyzer(), 10000 * RandomMultiplier);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Commongrams/CommonGramsFilterTest.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Commongrams/CommonGramsFilterTest.cs
index 449f6e4b3d..3bf82b8f76 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Commongrams/CommonGramsFilterTest.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Commongrams/CommonGramsFilterTest.cs
@@ -48,7 +48,7 @@ public virtual void TestReset()
assertEquals("the", term.ToString());
assertTrue(cgf.IncrementToken());
assertEquals("the_s", term.ToString());
- cgf.Dispose();
+ cgf.Close();
wt.SetReader(new StringReader(input));
cgf.Reset();
@@ -70,7 +70,7 @@ public virtual void TestQueryReset()
assertEquals("How_the", term.ToString());
assertTrue(nsf.IncrementToken());
assertEquals("the_s", term.ToString());
- nsf.Dispose();
+ nsf.Close();
wt.SetReader(new StringReader(input));
nsf.Reset();
@@ -86,7 +86,7 @@ public virtual void TestQueryReset()
/// tokens/positions in)
/// "foo bar the"=>"foo:1|bar:2,bar-the:2|the:3=> "foo" "bar-the" (2 tokens
/// out)
- ///
+ ///
///
[Test]
public virtual void TestCommonGramsQueryFilter()
@@ -282,4 +282,4 @@ public virtual void TestRandomStrings()
CheckRandomData(Random, b, 1000 * RandomMultiplier);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Compound/TestCompoundWordTokenFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Compound/TestCompoundWordTokenFilter.cs
index 251f762766..8ac4e49fa1 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Compound/TestCompoundWordTokenFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Compound/TestCompoundWordTokenFilter.cs
@@ -147,7 +147,7 @@ public virtual void TestReset()
assertTrue(tf.IncrementToken());
assertEquals("Rind", termAtt.ToString());
tf.End();
- tf.Dispose();
+ tf.Close();
wsTokenizer.SetReader(new StringReader("Rindfleischüberwachungsgesetz"));
tf.Reset();
assertTrue(tf.IncrementToken());
@@ -289,4 +289,4 @@ public virtual void TestEmptyTerm()
CheckOneTerm(b, "", "");
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestBugInSomething.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestBugInSomething.cs
index 99404217e8..c9d47abbae 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestBugInSomething.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestBugInSomething.cs
@@ -57,7 +57,7 @@ public virtual void Test()
{
reader = new MockCharFilter(reader, 0);
reader = new MappingCharFilter(map, reader);
- return reader;
+ return reader;
});
CheckAnalysisConsistency(Random, a, false, "wmgddzunizdomqyj");
}
@@ -261,13 +261,10 @@ public override void End()
Console.WriteLine(m_input.GetType().Name + ".end()");
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- base.Dispose(disposing);
- if (disposing)
- {
- Console.WriteLine(m_input.GetType().Name + ".close()");
- }
+ base.Close();
+ Console.WriteLine(m_input.GetType().Name + ".close()");
}
public override void Reset()
@@ -311,4 +308,4 @@ public virtual void TestCuriousWikipediaString()
CheckAnalysisConsistency(Random, a, false, "B\u28c3\ue0f8[ \ud800\udfc2
jb");
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs
index 1eccc5369b..07cb1d1125 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestDuelingAnalyzers.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Core
*/
///
- /// Compares MockTokenizer (which is simple with no optimizations) with equivalent
+ /// Compares MockTokenizer (which is simple with no optimizations) with equivalent
/// core tokenizers (that have optimizations like buffering).
- ///
+ ///
/// Any tests here need to probably consider unicode version of the JRE (it could
/// cause false fails).
///
@@ -195,8 +195,8 @@ public virtual void assertEquals(string s, TokenStream left, TokenStream right)
left.End();
right.End();
assertEquals("wrong final offset for input: " + s, leftOffset.EndOffset, rightOffset.EndOffset);
- left.Dispose();
- right.Dispose();
+ left.Close();
+ right.Close();
}
// TODO: maybe push this out to TestUtil or LuceneTestCase and always use it instead?
@@ -211,4 +211,4 @@ private static TextReader newStringReader(string s)
return r;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs
index 5403caef00..7461e32262 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestKeywordAnalyzer.cs
@@ -110,7 +110,7 @@ public virtual void TestOffsets()
}
finally
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
}
@@ -122,4 +122,4 @@ public virtual void TestRandomStrings()
CheckRandomData(Random, new KeywordAnalyzer(), 1000 * RandomMultiplier);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestRandomChains.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestRandomChains.cs
index 9840112734..c0a23f8694 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestRandomChains.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestRandomChains.cs
@@ -1162,7 +1162,7 @@ internal class CharFilterSpec
}
[Test]
- [AwaitsFix(BugUrl = "https://github.com/apache/lucenenet/issues/271#issuecomment-973005744")] // LUCENENET TODO: this test occasionally fails
+ //[AwaitsFix(BugUrl = "https://github.com/apache/lucenenet/issues/271#issuecomment-973005744")] // LUCENENET TODO: this test occasionally fails
public void TestRandomChains_()
{
int numIterations = AtLeast(20);
@@ -1189,7 +1189,7 @@ public void TestRandomChains_()
// we might regret this decision...
[Test]
- [AwaitsFix(BugUrl = "https://github.com/apache/lucenenet/issues/271#issuecomment-973005744")] // LUCENENET TODO: this test occasionally fails
+ //[AwaitsFix(BugUrl = "https://github.com/apache/lucenenet/issues/271#issuecomment-973005744")] // LUCENENET TODO: this test occasionally fails
public void TestRandomChainsWithLargeStrings()
{
int numIterations = AtLeast(20);
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs
index 8918530e8a..b89b57f68f 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopAnalyzer.cs
@@ -61,7 +61,7 @@ public virtual void TestDefaults()
}
finally
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
}
@@ -86,7 +86,7 @@ public virtual void TestStopList()
}
finally
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
}
@@ -116,8 +116,8 @@ public virtual void TestStopListPositions()
}
finally
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopFilter.cs
index c010241176..a76d1d9824 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestStopFilter.cs
@@ -151,7 +151,7 @@ private void DoTestStopPositons(StopFilter stpf, bool enableIcrements)
}
assertFalse(stpf.IncrementToken());
stpf.End();
- stpf.Dispose();
+ stpf.Close();
}
// print debug info depending on VERBOSE
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs
index 722b0bf922..bf5a212537 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs
@@ -76,7 +76,7 @@ public virtual void TestStopPositons()
reader = new StringReader(sb.ToString());
typeTokenFilter =
#pragma warning disable 612, 618
- new TypeTokenFilter(LuceneVersion.LUCENE_43,
+ new TypeTokenFilter(LuceneVersion.LUCENE_43,
#pragma warning restore 612, 618
false, new StandardTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
TestPositons(typeTokenFilter);
@@ -96,7 +96,7 @@ private void TestPositons(TypeTokenFilter stpf)
assertEquals("if position increment is enabled the positionIncrementAttribute value should be 3, otherwise 1", posIncrAtt.PositionIncrement, enablePositionIncrements ? 3 : 1);
}
stpf.End();
- stpf.Dispose();
+ stpf.Close();
}
[Test]
@@ -117,4 +117,4 @@ private static void log(string s)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestCodepointCountFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestCodepointCountFilter.cs
index 867694f5ea..2abfbed3cc 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestCodepointCountFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestCodepointCountFilter.cs
@@ -67,7 +67,7 @@ public virtual void TestRandomStrings()
stream.Reset();
assertEquals(expected, stream.IncrementToken());
stream.End();
- stream.Dispose();
+ stream.Close();
}
}
@@ -82,4 +82,4 @@ public virtual void TestIllegalArguments()
});
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestEmptyTokenStream.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestEmptyTokenStream.cs
index 54b61378ee..f20f1fafc4 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestEmptyTokenStream.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestEmptyTokenStream.cs
@@ -33,12 +33,12 @@ public virtual void TestConsume()
ts.Reset();
assertFalse(ts.IncrementToken());
ts.End();
- ts.Dispose();
+ ts.Close();
// try again with reuse:
ts.Reset();
assertFalse(ts.IncrementToken());
ts.End();
- ts.Dispose();
+ ts.Close();
}
[Test]
@@ -69,4 +69,4 @@ public virtual void TestIndexWriter_LUCENE4656()
directory.Dispose();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs
index cad2302c86..18293905e9 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Miscellaneous/TestPerFieldAnalyzerWrapper.cs
@@ -53,7 +53,7 @@ public virtual void TestPerField()
}
finally
{
- IOUtils.DisposeWhileHandlingException(tokenStream);
+ IOUtils.CloseWhileHandlingException(tokenStream);
}
tokenStream = analyzer.GetTokenStream("special", text);
@@ -69,7 +69,7 @@ public virtual void TestPerField()
}
finally
{
- IOUtils.DisposeWhileHandlingException(tokenStream);
+ IOUtils.CloseWhileHandlingException(tokenStream);
}
}
@@ -104,4 +104,4 @@ public virtual void TestCharFilters()
AssertAnalyzesTo(p, "ab", new string[] { "aab" }, new int[] { 0 }, new int[] { 2 });
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Pattern/TestPatternTokenizer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Pattern/TestPatternTokenizer.cs
index f7a0a939d6..8bf6edd494 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Pattern/TestPatternTokenizer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Pattern/TestPatternTokenizer.cs
@@ -114,7 +114,7 @@ private static string tsToString(TokenStream @in)
termAtt.SetEmpty().Append("bogusTerm");
}
- @in.Dispose();
+ @in.Close();
return @out.ToString();
}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterTest.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterTest.cs
index b0c3f31ddc..8ac75254d6 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterTest.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterTest.cs
@@ -47,7 +47,7 @@ public virtual void TestPayloads()
AssertTermEquals("dogs", filter, termAtt, payAtt, "NN".getBytes(Encoding.UTF8));
assertFalse(filter.IncrementToken());
filter.End();
- filter.Dispose();
+ filter.Close();
}
[Test]
@@ -69,7 +69,7 @@ public virtual void TestNext()
AssertTermEquals("dogs", filter, "NN".getBytes(Encoding.UTF8));
assertFalse(filter.IncrementToken());
filter.End();
- filter.Dispose();
+ filter.Close();
}
@@ -93,7 +93,7 @@ public virtual void TestFloatEncoding()
AssertTermEquals("dogs", filter, termAtt, payAtt, PayloadHelper.EncodeSingle(83.7f));
assertFalse(filter.IncrementToken());
filter.End();
- filter.Dispose();
+ filter.Close();
}
[Test]
@@ -116,7 +116,7 @@ public virtual void TestIntEncoding()
AssertTermEquals("dogs", filter, termAtt, payAtt, PayloadHelper.EncodeInt32(83));
assertFalse(filter.IncrementToken());
filter.End();
- filter.Dispose();
+ filter.Close();
}
internal virtual void AssertTermEquals(string expected, TokenStream stream, byte[] expectPay)
@@ -162,4 +162,4 @@ internal virtual void AssertTermEquals(string expected, TokenStream stream, ICha
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/TestDelimitedPayloadTokenFilterFactory.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/TestDelimitedPayloadTokenFilterFactory.cs
index 003012ae4f..f48394c5b5 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/TestDelimitedPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Payloads/TestDelimitedPayloadTokenFilterFactory.cs
@@ -45,7 +45,7 @@ public virtual void TestEncoder()
assertEquals(0.1f, payFloat, 0.0f);
}
stream.End();
- stream.Dispose();
+ stream.Close();
}
[Test]
@@ -65,7 +65,7 @@ public virtual void TestDelim()
assertEquals(0.1f, payFloat, 0.0f);
}
stream.End();
- stream.Dispose();
+ stream.Close();
}
///
@@ -84,4 +84,4 @@ public virtual void TestBogusArguments()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs
index cd9fa26dd0..bca980687b 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapperTest.cs
@@ -110,7 +110,7 @@ public virtual void TestShingleAnalyzerWrapperPhraseQuery()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
ScoreDoc[] hits = searcher.Search(q, null, 1000).ScoreDocs;
@@ -143,7 +143,7 @@ public virtual void TestShingleAnalyzerWrapperBooleanQuery()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
ScoreDoc[] hits = searcher.Search(q, null, 1000).ScoreDocs;
@@ -237,4 +237,4 @@ public virtual void TestOutputUnigramsIfNoShinglesSingleToken()
AssertAnalyzesTo(analyzer, "please", new string[] { "please" }, new int[] { 0 }, new int[] { 6 }, new int[] { 1 });
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Synonym/TestSynonymMapFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Synonym/TestSynonymMapFilter.cs
index 76b6b9b3d0..e6eb0f94c3 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Synonym/TestSynonymMapFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Synonym/TestSynonymMapFilter.cs
@@ -150,7 +150,7 @@ private void Verify(string input, string output)
}
}
tokensOut.End();
- tokensOut.Dispose();
+ tokensOut.Close();
if (Verbose)
{
Console.WriteLine(" incr: END");
@@ -229,7 +229,7 @@ public virtual void TestBasic()
assertTrue(tokensIn.IncrementToken());
assertFalse(tokensIn.IncrementToken());
tokensIn.End();
- tokensIn.Dispose();
+ tokensIn.Close();
tokensOut = new SynonymFilter(tokensIn, b.Build(), true);
termAtt = tokensOut.AddAttribute();
@@ -482,7 +482,7 @@ public virtual void TestRandom()
assertTrue(tokensIn.IncrementToken());
assertFalse(tokensIn.IncrementToken());
tokensIn.End();
- tokensIn.Dispose();
+ tokensIn.Close();
tokensOut = new SynonymFilter(tokensIn, b.Build(), true);
termAtt = tokensOut.AddAttribute();
@@ -724,7 +724,7 @@ public virtual void TestBasic2()
assertTrue(tokensIn.IncrementToken());
assertFalse(tokensIn.IncrementToken());
tokensIn.End();
- tokensIn.Dispose();
+ tokensIn.Close();
tokensOut = new SynonymFilter(tokensIn, b.Build(), true);
termAtt = tokensOut.AddAttribute();
@@ -861,7 +861,7 @@ public virtual void TestOutputHangsOffEnd()
assertTrue(tokensIn.IncrementToken());
assertFalse(tokensIn.IncrementToken());
tokensIn.End();
- tokensIn.Dispose();
+ tokensIn.Close();
tokensOut = new SynonymFilter(tokensIn, b.Build(), true);
termAtt = tokensOut.AddAttribute();
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs
index 53c4b28ae5..50acb5283f 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Th
///
/// Test case for ThaiAnalyzer, modified from TestFrenchAnalyzer
- ///
+ ///
///
public class TestThaiAnalyzer : BaseTokenStreamTestCase
@@ -74,7 +74,7 @@ public virtual void TestBackwardsStopWords()
///
/// Thai numeric tokens were typed as instead of .
- /// @deprecated (3.1) testing backwards behavior
+ /// @deprecated (3.1) testing backwards behavior
[Test]
[Obsolete("(3.1) testing backwards behavior")]
public virtual void TestBuggyTokenType30()
@@ -82,7 +82,7 @@ public virtual void TestBuggyTokenType30()
AssertAnalyzesTo(new ThaiAnalyzer(LuceneVersion.LUCENE_30), "การที่ได้ต้องแสดงว่างานดี ๑๒๓", new string[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี", "๑๒๓" }, new string[] { "", "", "", "", "", "", "", "", "" });
}
- /// @deprecated (3.1) testing backwards behavior
+ /// @deprecated (3.1) testing backwards behavior
[Test]
[Obsolete("(3.1) testing backwards behavior")]
public virtual void TestAnalyzer30()
@@ -166,7 +166,7 @@ public virtual void TestReusableTokenStream()
AssertAnalyzesTo(analyzer, "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com", new string[] { "บริษัท", "ชื่อ", "xy", "z", "คุย", "กับ", "xyz", "demo.com" });
}
- /// @deprecated (3.1) for version back compat
+ /// @deprecated (3.1) for version back compat
[Test]
[Obsolete("(3.1) for version back compat")]
public virtual void TestReusableTokenStream30()
@@ -259,10 +259,10 @@ private static void AssertAnalyzer(Analyzer analyzer, string text)
JCG.List startOffsets = new JCG.List();
JCG.List endOffsets = new JCG.List();
- TokenStream ts;
TextReader reader = new StringReader(text);
+ TokenStream ts = analyzer.GetTokenStream("dummy", reader);
- using (ts = analyzer.GetTokenStream("dummy", reader))
+ try
{
bool isReset = false;
try
@@ -303,12 +303,19 @@ private static void AssertAnalyzer(Analyzer analyzer, string text)
// ignore
}
}
- ts.End(); // ts.end();
+
+ ts.End();
}
- } // ts.Dispose()
+ }
+ finally
+ {
+ ts.Close();
+ }
reader = new StringReader(text);
- using (ts = analyzer.GetTokenStream("dummy", reader))
+ ts = analyzer.GetTokenStream("dummy", reader);
+
+ try
{
bool isReset = false;
try
@@ -346,11 +353,14 @@ private static void AssertAnalyzer(Analyzer analyzer, string text)
// ignore
}
}
- ts.End(); // ts.end();
+ ts.End();
}
}
-
- } // ts.Dispose()
+ finally
+ {
+ ts.Close();
+ }
+ }
///
@@ -364,7 +374,7 @@ public virtual void TestRandomStrings()
///
/// blast some random large strings through the analyzer
- ///
+ ///
[Test]
[AwaitsFix(BugUrl = "https://github.com/apache/lucenenet/issues/269")] // LUCENENET TODO: this test occasionally fails
public virtual void TestRandomHugeStrings()
@@ -383,7 +393,7 @@ public virtual void TestAttributeReuse()
// just consume
TokenStream ts = analyzer.GetTokenStream("dummy", "ภาษาไทย");
AssertTokenStreamContents(ts, new string[] { "ภาษา", "ไทย" });
- // this consumer adds flagsAtt, which this analyzer does not use.
+ // this consumer adds flagsAtt, which this analyzer does not use.
ts = analyzer.GetTokenStream("dummy", "ภาษาไทย");
ts.AddAttribute();
AssertTokenStreamContents(ts, new string[] { "ภาษา", "ไทย" });
@@ -406,4 +416,4 @@ public virtual void TestNumeralBreaking()
}
}
}
-#endif
\ No newline at end of file
+#endif
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs
index 7a8b011452..3a0540ae86 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestCharTokenizers.cs
@@ -39,7 +39,7 @@ public class TestCharTokenizers : BaseTokenStreamTestCase
{
/*
- * test to read surrogate pairs without loosing the pairing
+ * test to read surrogate pairs without loosing the pairing
* if the surrogate pair is at the border of the internal IO buffer
*/
[Test]
@@ -145,7 +145,7 @@ public virtual void TestCrossPlaneNormalization()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
// just for fun
@@ -208,7 +208,7 @@ public virtual void TestCrossPlaneNormalization2()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
// just for fun
@@ -290,4 +290,4 @@ protected override bool IsTokenChar(int c)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestElision.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestElision.cs
index 731c355d9f..46f6ce928d 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestElision.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Util/TestElision.cs
@@ -53,7 +53,7 @@ private IList Filter(TokenFilter filter)
tas.Add(termAtt.ToString());
}
filter.End();
- filter.Dispose();
+ filter.Close();
return tas;
}
@@ -68,4 +68,4 @@ public virtual void TestEmptyTerm()
CheckOneTerm(a, "", "");
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerTest.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerTest.cs
index f04f8b7e62..5f38f4bc0a 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerTest.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerTest.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Wikipedia
///
/// Basic Tests for
- ///
+ ///
///
public class WikipediaTokenizerTest : BaseTokenStreamTestCase
{
@@ -107,7 +107,7 @@ public virtual void TestBoth()
assertEquals("flags " + i, expectedFlags[i], flagsAtt.Flags);
}
assertFalse(tf.IncrementToken());
- tf.Dispose();
+ tf.Close();
}
///
@@ -137,4 +137,4 @@ public virtual void TestRandomHugeStrings()
CheckRandomData(random, a, 100 * RandomMultiplier, 8192);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.ICU/Analysis/Icu/Segmentation/TestICUTokenizer.cs b/src/Lucene.Net.Tests.Analysis.ICU/Analysis/Icu/Segmentation/TestICUTokenizer.cs
index 5a6fbfbfef..b034c00ad6 100644
--- a/src/Lucene.Net.Tests.Analysis.ICU/Analysis/Icu/Segmentation/TestICUTokenizer.cs
+++ b/src/Lucene.Net.Tests.Analysis.ICU/Analysis/Icu/Segmentation/TestICUTokenizer.cs
@@ -157,7 +157,7 @@ public void TestLao()
}
[Test]
- public void TestMyanmar()
+ public void TestMyanmar()
{
AssertAnalyzesTo(a, "သက်ဝင်လှုပ်ရှားစေပြီး", new String[] { "သက်ဝင်", "လှုပ်ရှား", "စေ", "ပြီး" });
}
@@ -339,17 +339,25 @@ public void TestRandomHugeStrings()
[Test]
public void TestTokenAttributes()
{
- using TokenStream ts = a.GetTokenStream("dummy", "This is a test");
- IScriptAttribute scriptAtt = ts.AddAttribute();
- ts.Reset();
- while (ts.IncrementToken())
+ TokenStream ts = a.GetTokenStream("dummy", "This is a test");
+ try
{
- assertEquals(UScript.Latin, scriptAtt.Code);
- assertEquals(UScript.GetName(UScript.Latin), scriptAtt.GetName());
- assertEquals(UScript.GetShortName(UScript.Latin), scriptAtt.GetShortName());
- assertTrue(ts.ReflectAsString(false).Contains("script=Latin"));
+ IScriptAttribute scriptAtt = ts.AddAttribute();
+ ts.Reset();
+ while (ts.IncrementToken())
+ {
+ assertEquals(UScript.Latin, scriptAtt.Code);
+ assertEquals(UScript.GetName(UScript.Latin), scriptAtt.GetName());
+ assertEquals(UScript.GetShortName(UScript.Latin), scriptAtt.GetShortName());
+ assertTrue(ts.ReflectAsString(false).Contains("script=Latin"));
+ }
+
+ ts.End();
+ }
+ finally
+ {
+ IOUtils.CloseWhileHandlingException(ts);
}
- ts.End();
}
private sealed class ThreadAnonymousClass : ThreadJob
@@ -414,4 +422,4 @@ public void TestICUConcurrency()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Tests.Analysis.ICU/Collation/TestICUCollationKeyFilterFactory.cs b/src/Lucene.Net.Tests.Analysis.ICU/Collation/TestICUCollationKeyFilterFactory.cs
index 052cc8cac5..a911c4dbe3 100644
--- a/src/Lucene.Net.Tests.Analysis.ICU/Collation/TestICUCollationKeyFilterFactory.cs
+++ b/src/Lucene.Net.Tests.Analysis.ICU/Collation/TestICUCollationKeyFilterFactory.cs
@@ -46,8 +46,8 @@ public class TestICUCollationKeyFilterFactory : BaseTokenStreamTestCase
[Test]
public void TestBasicUsage()
{
- String turkishUpperCase = "I WİLL USE TURKİSH CASING";
- String turkishLowerCase = "ı will use turkish casıng";
+ string turkishUpperCase = "I WİLL USE TURKİSH CASING";
+ string turkishLowerCase = "ı will use turkish casıng";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "tr",
"strength", "primary");
@@ -64,8 +64,8 @@ public void TestBasicUsage()
[Test]
public void TestNormalization()
{
- String turkishUpperCase = "I W\u0049\u0307LL USE TURKİSH CASING";
- String turkishLowerCase = "ı will use turkish casıng";
+ string turkishUpperCase = "I W\u0049\u0307LL USE TURKİSH CASING";
+ string turkishLowerCase = "ı will use turkish casıng";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "tr",
"strength", "primary",
@@ -83,8 +83,8 @@ public void TestNormalization()
[Test]
public void TestSecondaryStrength()
{
- String upperCase = "TESTING";
- String lowerCase = "testing";
+ string upperCase = "TESTING";
+ string lowerCase = "testing";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "en",
"strength", "secondary",
@@ -98,13 +98,13 @@ public void TestSecondaryStrength()
/*
* Setting alternate=shifted to shift whitespace, punctuation and symbols
- * to quaternary level
+ * to quaternary level
*/
[Test]
public void TestIgnorePunctuation()
{
- String withPunctuation = "foo-bar";
- String withoutPunctuation = "foo bar";
+ string withPunctuation = "foo-bar";
+ string withoutPunctuation = "foo bar";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "en",
"strength", "primary",
@@ -117,15 +117,15 @@ public void TestIgnorePunctuation()
}
/*
- * Setting alternate=shifted and variableTop to shift whitespace, but not
- * punctuation or symbols, to quaternary level
+ * Setting alternate=shifted and variableTop to shift whitespace, but not
+ * punctuation or symbols, to quaternary level
*/
[Test]
public void TestIgnoreWhitespace()
{
- String withSpace = "foo bar";
- String withoutSpace = "foobar";
- String withPunctuation = "foo-bar";
+ string withSpace = "foo bar";
+ string withoutSpace = "foobar";
+ string withPunctuation = "foo-bar";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "en",
"strength", "primary",
@@ -151,8 +151,8 @@ public void TestIgnoreWhitespace()
[Test]
public void TestNumerics()
{
- String nine = "foobar-9";
- String ten = "foobar-10";
+ string nine = "foobar-9";
+ string ten = "foobar-10";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "en",
"numeric", "true");
@@ -170,10 +170,10 @@ public void TestNumerics()
[Test]
public void TestIgnoreAccentsButNotCase()
{
- String withAccents = "résumé";
- String withoutAccents = "resume";
- String withAccentsUpperCase = "Résumé";
- String withoutAccentsUpperCase = "Resume";
+ string withAccents = "résumé";
+ string withoutAccents = "resume";
+ string withAccentsUpperCase = "Résumé";
+ string withoutAccentsUpperCase = "Resume";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "en",
"strength", "primary",
@@ -205,8 +205,8 @@ public void TestIgnoreAccentsButNotCase()
[Test]
public void TestUpperCaseFirst()
{
- String lower = "resume";
- String upper = "Resume";
+ string lower = "resume";
+ string upper = "Resume";
TokenFilterFactory factory = tokenFilterFactory("ICUCollationKey",
"locale", "en",
"strength", "tertiary",
@@ -230,7 +230,7 @@ public void TestCustomRules()
{
RuleBasedCollator baseCollator = (RuleBasedCollator)Collator.GetInstance(new UCultureInfo("de_DE"));
- String DIN5007_2_tailorings =
+ string DIN5007_2_tailorings =
"& ae , a\u0308 & AE , A\u0308" +
"& oe , o\u0308 & OE , O\u0308" +
"& ue , u\u0308 & UE , u\u0308";
@@ -238,12 +238,12 @@ public void TestCustomRules()
RuleBasedCollator tailoredCollator = new RuleBasedCollator(baseCollator.GetRules() + DIN5007_2_tailorings);
string tailoredRules = tailoredCollator.GetRules();
//
- // at this point, you would save these tailoredRules to a file,
+ // at this point, you would save these tailoredRules to a file,
// and use the custom parameter.
//
- String germanUmlaut = "Töne";
- String germanOE = "Toene";
- IDictionary args = new Dictionary();
+ string germanUmlaut = "Töne";
+ string germanOE = "Toene";
+ IDictionary args = new Dictionary();
args["custom"] = "rules.txt";
args["strength"] = "primary";
ICUCollationKeyFilterFactory factory = new ICUCollationKeyFilterFactory(args);
@@ -276,49 +276,49 @@ private void assertCollation(TokenStream stream1, TokenStream stream2, int compa
assertFalse(stream2.IncrementToken());
stream1.End();
stream2.End();
- stream1.Dispose();
- stream2.Dispose();
+ stream1.Close();
+ stream2.Close();
}
private class StringMockResourceLoader : IResourceLoader
{
- String text;
+ string text;
- internal StringMockResourceLoader(String text)
+ internal StringMockResourceLoader(string text)
{
this.text = text;
}
- public T NewInstance(String cname)
+ public T NewInstance(string cname)
{
return default;
}
- public Type FindType(String cname)
+ public Type FindType(string cname)
{
return null;
}
- public Stream OpenResource(String resource)
+ public Stream OpenResource(string resource)
{
return new MemoryStream(Encoding.UTF8.GetBytes(text));
}
}
- private TokenFilterFactory tokenFilterFactory(String name, params String[] keysAndValues)
+ private TokenFilterFactory tokenFilterFactory(string name, params string[] keysAndValues)
{
Type clazz = TokenFilterFactory.LookupClass(name);
if (keysAndValues.Length % 2 == 1)
{
throw new ArgumentException("invalid keysAndValues map");
}
- IDictionary args = new Dictionary();
+ IDictionary args = new Dictionary();
for (int i = 0; i < keysAndValues.Length; i += 2)
{
- String prev = args.Put(keysAndValues[i], keysAndValues[i + 1]);
+ string prev = args.Put(keysAndValues[i], keysAndValues[i + 1]);
assertNull("duplicate values for key: " + keysAndValues[i], prev);
}
- String previous = args.Put("luceneMatchVersion", TEST_VERSION_CURRENT.ToString());
+ string previous = args.Put("luceneMatchVersion", TEST_VERSION_CURRENT.ToString());
assertNull("duplicate values for key: luceneMatchVersion", previous);
TokenFilterFactory factory = null;
try
diff --git a/src/Lucene.Net.Tests.Analysis.Kuromoji/TestExtendedMode.cs b/src/Lucene.Net.Tests.Analysis.Kuromoji/TestExtendedMode.cs
index 4330ec923d..c36fd9e8af 100644
--- a/src/Lucene.Net.Tests.Analysis.Kuromoji/TestExtendedMode.cs
+++ b/src/Lucene.Net.Tests.Analysis.Kuromoji/TestExtendedMode.cs
@@ -60,7 +60,7 @@ public void TestSurrogates2()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
}
diff --git a/src/Lucene.Net.Tests.Analysis.Kuromoji/TestJapaneseTokenizer.cs b/src/Lucene.Net.Tests.Analysis.Kuromoji/TestJapaneseTokenizer.cs
index f649a03d13..9ebf88597a 100644
--- a/src/Lucene.Net.Tests.Analysis.Kuromoji/TestJapaneseTokenizer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Kuromoji/TestJapaneseTokenizer.cs
@@ -94,7 +94,7 @@ public void TestNormalMode()
{
AssertAnalyzesTo(analyzerNormal,
"シニアソフトウェアエンジニア",
- new String[] { "シニアソフトウェアエンジニア" });
+ new string[] { "シニアソフトウェアエンジニア" });
}
[Test]
@@ -102,7 +102,7 @@ public void TestDecomposition1()
{
AssertAnalyzesTo(analyzerNoPunct, "本来は、貧困層の女性や子供に医療保護を提供するために創設された制度である、" +
"アメリカ低所得者医療援助制度が、今日では、その予算の約3分の1を老人に費やしている。",
- new String[] { "本来", "は", "貧困", "層", "の", "女性", "や", "子供", "に", "医療", "保護", "を",
+ new string[] { "本来", "は", "貧困", "層", "の", "女性", "や", "子供", "に", "医療", "保護", "を",
"提供", "する", "ため", "に", "創設", "さ", "れ", "た", "制度", "で", "ある", "アメリカ",
"低", "所得", "者", "医療", "援助", "制度", "が", "今日", "で", "は", "その",
"予算", "の", "約", "3", "分の", "1", "を", "老人", "に", "費やし", "て", "いる" },
@@ -119,7 +119,7 @@ public void TestDecomposition1()
public void TestDecomposition2()
{
AssertAnalyzesTo(analyzerNoPunct, "麻薬の密売は根こそぎ絶やさなければならない",
- new String[] { "麻薬", "の", "密売", "は", "根こそぎ", "絶やさ", "なけれ", "ば", "なら", "ない" },
+ new string[] { "麻薬", "の", "密売", "は", "根こそぎ", "絶やさ", "なけれ", "ば", "なら", "ない" },
new int[] { 0, 2, 3, 5, 6, 10, 13, 16, 17, 19 },
new int[] { 2, 3, 5, 6, 10, 13, 16, 17, 19, 21 }
);
@@ -129,7 +129,7 @@ public void TestDecomposition2()
public void TestDecomposition3()
{
AssertAnalyzesTo(analyzerNoPunct, "魔女狩大将マシュー・ホプキンス。",
- new String[] { "魔女", "狩", "大将", "マシュー", "ホプキンス" },
+ new string[] { "魔女", "狩", "大将", "マシュー", "ホプキンス" },
new int[] { 0, 2, 3, 5, 10 },
new int[] { 2, 3, 5, 9, 15 }
);
@@ -139,7 +139,7 @@ public void TestDecomposition3()
public void TestDecomposition4()
{
AssertAnalyzesTo(analyzer, "これは本ではない",
- new String[] { "これ", "は", "本", "で", "は", "ない" },
+ new string[] { "これ", "は", "本", "で", "は", "ない" },
new int[] { 0, 2, 3, 4, 5, 6 },
new int[] { 2, 3, 4, 5, 6, 8 }
);
@@ -163,7 +163,7 @@ public void TestDecomposition5()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
@@ -195,7 +195,7 @@ public void TestTwoSentences()
*/
AssertAnalyzesTo(analyzerNoPunct, "魔女狩大将マシュー・ホプキンス。 魔女狩大将マシュー・ホプキンス。",
- new String[] { "魔女", "狩", "大将", "マシュー", "ホプキンス", "魔女", "狩", "大将", "マシュー", "ホプキンス" },
+ new string[] { "魔女", "狩", "大将", "マシュー", "ホプキンス", "魔女", "狩", "大将", "マシュー", "ホプキンス" },
new int[] { 0, 2, 3, 5, 10, 17, 19, 20, 22, 27 },
new int[] { 2, 3, 5, 9, 15, 19, 20, 22, 26, 32 }
);
@@ -239,7 +239,7 @@ public void TestLargeDocReliability()
{
for (int i = 0; i < 100; i++)
{
- String s = TestUtil.RandomUnicodeString(Random, 10000);
+ string s = TestUtil.RandomUnicodeString(Random, 10000);
TokenStream ts = analyzer.GetTokenStream("foo", s);
try
{
@@ -251,7 +251,7 @@ public void TestLargeDocReliability()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
}
@@ -261,7 +261,7 @@ public void TestLargeDocReliability()
public void TestSurrogates()
{
AssertAnalyzesTo(analyzer, "𩬅艱鍟䇹愯瀛",
- new String[] { "𩬅", "艱", "鍟", "䇹", "愯", "瀛" });
+ new string[] { "𩬅", "艱", "鍟", "䇹", "愯", "瀛" });
}
/** random test ensuring we don't ever split supplementaries */
@@ -275,7 +275,7 @@ public void TestSurrogates2()
{
Console.WriteLine("\nTEST: iter=" + i);
}
- String s = TestUtil.RandomUnicodeString(Random, 100);
+ string s = TestUtil.RandomUnicodeString(Random, 100);
TokenStream ts = analyzer.GetTokenStream("foo", s);
try
{
@@ -289,7 +289,7 @@ public void TestSurrogates2()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
}
@@ -306,7 +306,7 @@ public void TestOnlyPunctuation()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
@@ -322,7 +322,7 @@ public void TestOnlyPunctuationExtended()
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
@@ -332,14 +332,14 @@ public void TestOnlyPunctuationExtended()
public void TestEnd()
{
AssertTokenStreamContents(analyzerNoPunct.GetTokenStream("foo", "これは本ではない"),
- new String[] { "これ", "は", "本", "で", "は", "ない" },
+ new string[] { "これ", "は", "本", "で", "は", "ない" },
new int[] { 0, 2, 3, 4, 5, 6 },
new int[] { 2, 3, 4, 5, 6, 8 },
new int?(8)
);
AssertTokenStreamContents(analyzerNoPunct.GetTokenStream("foo", "これは本ではない "),
- new String[] { "これ", "は", "本", "で", "は", "ない" },
+ new string[] { "これ", "は", "本", "で", "は", "ない" },
new int[] { 0, 2, 3, 4, 5, 6, 8 },
new int[] { 2, 3, 4, 5, 6, 8, 9 },
new int?(12)
@@ -352,7 +352,7 @@ public void TestUserDict()
// Not a great test because w/o userdict.txt the
// segmentation is the same:
AssertTokenStreamContents(analyzer.GetTokenStream("foo", "関西国際空港に行った"),
- new String[] { "関西", "国際", "空港", "に", "行っ", "た" },
+ new string[] { "関西", "国際", "空港", "に", "行っ", "た" },
new int[] { 0, 2, 4, 6, 7, 9 },
new int[] { 2, 4, 6, 7, 9, 10 },
new int?(10)
@@ -364,7 +364,7 @@ public void TestUserDict2()
{
// Better test: w/o userdict the segmentation is different:
AssertTokenStreamContents(analyzer.GetTokenStream("foo", "朝青龍"),
- new String[] { "朝青龍" },
+ new string[] { "朝青龍" },
new int[] { 0 },
new int[] { 3 },
new int?(3)
@@ -376,7 +376,7 @@ public void TestUserDict3()
{
// Test entry that breaks into multiple tokens:
AssertTokenStreamContents(analyzer.GetTokenStream("foo", "abcd"),
- new String[] { "a", "b", "cd" },
+ new string[] { "a", "b", "cd" },
new int[] { 0, 1, 2 },
new int[] { 1, 2, 4 },
new int?(4)
@@ -410,8 +410,8 @@ public void TestSegmentation()
// "スペース", "ステーション", "に", "行き", "ます", "。",
// "うたがわしい", "。"
// };
- String input = "スペースステーションに行きます。うたがわしい。";
- String[]
+ string input = "スペースステーションに行きます。うたがわしい。";
+ string[]
surfaceForms = {
"スペース", "ステーション", "に", "行き", "ます", "。",
"うたがわしい", "。"
@@ -435,8 +435,8 @@ public void TestLatticeToDot()
});
- String input = "スペースステーションに行きます。うたがわしい。";
- String[] surfaceForms = {
+ string input = "スペースステーションに行きます。うたがわしい。";
+ string[] surfaceForms = {
"スペース", "ステーション", "に", "行き", "ます", "。",
"うたがわしい", "。"
};
@@ -448,14 +448,14 @@ public void TestLatticeToDot()
assertTrue(gv2.Finish().IndexOf("22.0", StringComparison.Ordinal) != -1);
}
- private void assertReadings(String input, params String[] readings)
+ private void assertReadings(string input, params string[] readings)
{
TokenStream ts = analyzer.GetTokenStream("ignored", input);
try
{
IReadingAttribute readingAtt = ts.AddAttribute();
ts.Reset();
- foreach (String reading in readings)
+ foreach (string reading in readings)
{
assertTrue(ts.IncrementToken());
assertEquals(reading, readingAtt.GetReading());
@@ -465,18 +465,18 @@ private void assertReadings(String input, params String[] readings)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
- private void assertPronunciations(String input, params String[] pronunciations)
+ private void assertPronunciations(string input, params string[] pronunciations)
{
TokenStream ts = analyzer.GetTokenStream("ignored", input);
try
{
IReadingAttribute readingAtt = ts.AddAttribute();
ts.Reset();
- foreach (String pronunciation in pronunciations)
+ foreach (string pronunciation in pronunciations)
{
assertTrue(ts.IncrementToken());
assertEquals(pronunciation, readingAtt.GetPronunciation());
@@ -486,18 +486,18 @@ private void assertPronunciations(String input, params String[] pronunciations)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
- private void assertBaseForms(String input, params String[] baseForms)
+ private void assertBaseForms(string input, params string[] baseForms)
{
TokenStream ts = analyzer.GetTokenStream("ignored", input);
try
{
IBaseFormAttribute baseFormAtt = ts.AddAttribute();
ts.Reset();
- foreach (String baseForm in baseForms)
+ foreach (string baseForm in baseForms)
{
assertTrue(ts.IncrementToken());
assertEquals(baseForm, baseFormAtt.GetBaseForm());
@@ -507,18 +507,18 @@ private void assertBaseForms(String input, params String[] baseForms)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
- private void assertInflectionTypes(String input, params String[] inflectionTypes)
+ private void assertInflectionTypes(string input, params string[] inflectionTypes)
{
TokenStream ts = analyzer.GetTokenStream("ignored", input);
try
{
IInflectionAttribute inflectionAtt = ts.AddAttribute();
ts.Reset();
- foreach (String inflectionType in inflectionTypes)
+ foreach (string inflectionType in inflectionTypes)
{
assertTrue(ts.IncrementToken());
assertEquals(inflectionType, inflectionAtt.GetInflectionType());
@@ -528,18 +528,18 @@ private void assertInflectionTypes(String input, params String[] inflectionTypes
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
- private void assertInflectionForms(String input, params String[] inflectionForms)
+ private void assertInflectionForms(string input, params string[] inflectionForms)
{
TokenStream ts = analyzer.GetTokenStream("ignored", input);
try
{
IInflectionAttribute inflectionAtt = ts.AddAttribute();
ts.Reset();
- foreach (String inflectionForm in inflectionForms)
+ foreach (string inflectionForm in inflectionForms)
{
assertTrue(ts.IncrementToken());
assertEquals(inflectionForm, inflectionAtt.GetInflectionForm());
@@ -549,18 +549,18 @@ private void assertInflectionForms(String input, params String[] inflectionForms
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
- private void assertPartsOfSpeech(String input, params String[] partsOfSpeech)
+ private void assertPartsOfSpeech(string input, params string[] partsOfSpeech)
{
TokenStream ts = analyzer.GetTokenStream("ignored", input);
try
{
IPartOfSpeechAttribute partOfSpeechAtt = ts.AddAttribute();
ts.Reset();
- foreach (String partOfSpeech in partsOfSpeech)
+ foreach (string partOfSpeech in partsOfSpeech)
{
assertTrue(ts.IncrementToken());
assertEquals(partOfSpeech, partOfSpeechAtt.GetPartOfSpeech());
@@ -570,7 +570,7 @@ private void assertPartsOfSpeech(String input, params String[] partsOfSpeech)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
@@ -695,14 +695,14 @@ public void TestPartOfSpeech()
public void TestYabottai()
{
AssertAnalyzesTo(analyzer, "やぼったい",
- new String[] { "やぼったい" });
+ new string[] { "やぼったい" });
}
[Test]
public void TestTsukitosha()
{
AssertAnalyzesTo(analyzer, "突き通しゃ",
- new String[] { "突き通しゃ" });
+ new string[] { "突き通しゃ" });
}
[Test]
@@ -770,7 +770,7 @@ private void doTestBocchan(int numIterations)
{
TextReader reader = new StreamReader(
this.GetType().getResourceAsStream("bocchan.utf-8"), Encoding.UTF8);
- String line = reader.ReadLine();
+ string line = reader.ReadLine();
reader.Dispose();
if (Verbose)
@@ -801,10 +801,10 @@ private void doTestBocchan(int numIterations)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
- String[] sentences = Regex.Split(line, "、|。").TrimEnd();
+ string[] sentences = Regex.Split(line, "、|。").TrimEnd();
if (Verbose)
{
Console.WriteLine("Total time : " + ((Time.NanoTime() / Time.MillisecondsPerNanosecond) - totalStart)); // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results
@@ -813,7 +813,7 @@ private void doTestBocchan(int numIterations)
totalStart = Time.NanoTime() / Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results
for (int i = 0; i < numIterations; i++)
{
- foreach (String sentence in sentences)
+ foreach (string sentence in sentences)
{
TokenStream ts = analyzer.GetTokenStream("ignored", sentence);
try
@@ -824,7 +824,7 @@ private void doTestBocchan(int numIterations)
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
}
@@ -838,7 +838,7 @@ private void doTestBocchan(int numIterations)
public void TestWithPunctuation()
{
AssertAnalyzesTo(analyzerNoPunct, "羽田。空港",
- new String[] { "羽田", "空港" },
+ new string[] { "羽田", "空港" },
new int[] { 1, 1 });
}
@@ -846,7 +846,7 @@ public void TestWithPunctuation()
public void TestCompoundOverPunctuation()
{
AssertAnalyzesToPositions(analyzerNoPunct, "dεε϶ϢϏΎϷΞͺ羽田",
- new String[] { "d", "ε", "ε", "ϢϏΎϷΞͺ", "羽田" },
+ new string[] { "d", "ε", "ε", "ϢϏΎϷΞͺ", "羽田" },
new int[] { 1, 1, 1, 1, 1 },
new int[] { 1, 1, 1, 1, 1 });
}
@@ -858,7 +858,7 @@ public void TestCompoundOverPunctuation()
[Test]
public void TestEmptyBacktrace()
{
- String text = "";
+ string text = "";
// since the max backtrace gap ({@link JapaneseTokenizer#MAX_BACKTRACE_GAP)
// is set to 1024, we want the first 1023 characters to generate multiple paths
@@ -872,7 +872,7 @@ public void TestEmptyBacktrace()
// will end-up together
text += "手紙";
- IList outputs = new List();
+ IList outputs = new List();
for (int i = 0; i < 511; i++)
{
outputs.Add("ああ");
diff --git a/src/Lucene.Net.Tests.Analysis.Morfologik/Morfologik/TestMorfologikAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Morfologik/Morfologik/TestMorfologikAnalyzer.cs
index e65d3ea7c8..b4a11aff14 100644
--- a/src/Lucene.Net.Tests.Analysis.Morfologik/Morfologik/TestMorfologikAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Morfologik/Morfologik/TestMorfologikAnalyzer.cs
@@ -6,6 +6,7 @@
using Lucene.Net.Analysis.TokenAttributes;
using Lucene.Net.Analysis.Util;
using Lucene.Net.Support;
+using Lucene.Net.Util;
using NUnit.Framework;
using System;
using System.IO;
@@ -33,7 +34,7 @@ namespace Lucene.Net.Analysis.Morfologik
*/
///
- /// TODO: The tests below rely on the order of returned lemmas, which is probably not good.
+ /// TODO: The tests below rely on the order of returned lemmas, which is probably not good.
///
public class TestMorfologikAnalyzer : BaseTokenStreamTestCase
{
@@ -80,24 +81,33 @@ public void TestMultipleTokens()
private void dumpTokens(String input)
{
using Analyzer a = getTestAnalyzer();
- using TokenStream ts = a.GetTokenStream("dummy", input);
- ts.Reset();
+ TokenStream ts = a.GetTokenStream("dummy", input);
+ try
+ {
+ ts.Reset();
+
+ IMorphosyntacticTagsAttribute attribute = ts.GetAttribute();
+ ICharTermAttribute charTerm = ts.GetAttribute();
+ while (ts.IncrementToken())
+ {
+ Console.WriteLine(charTerm.ToString() + " => " + string.Format(StringFormatter.InvariantCulture, "{0}", attribute.Tags));
+ }
- IMorphosyntacticTagsAttribute attribute = ts.GetAttribute();
- ICharTermAttribute charTerm = ts.GetAttribute();
- while (ts.IncrementToken())
+ ts.End();
+ }
+ finally
{
- Console.WriteLine(charTerm.ToString() + " => " + string.Format(StringFormatter.InvariantCulture, "{0}", attribute.Tags));
+ IOUtils.CloseWhileHandlingException(ts);
}
- ts.End();
}
/** Test reuse of MorfologikFilter with leftover stems. */
[Test]
public void TestLeftoverStems()
{
- Analyzer a = getTestAnalyzer();
- using (TokenStream ts_1 = a.GetTokenStream("dummy", "liście"))
+ using Analyzer a = getTestAnalyzer();
+ TokenStream ts_1 = a.GetTokenStream("dummy", "liście");
+ try
{
ICharTermAttribute termAtt_1 = ts_1.GetAttribute();
ts_1.Reset();
@@ -105,8 +115,13 @@ public void TestLeftoverStems()
assertEquals("first stream", "liście", termAtt_1.ToString());
ts_1.End();
}
+ finally
+ {
+ IOUtils.CloseWhileHandlingException(ts_1);
+ }
- using (TokenStream ts_2 = a.GetTokenStream("dummy", "danych"))
+ TokenStream ts_2 = a.GetTokenStream("dummy", "danych");
+ try
{
ICharTermAttribute termAtt_2 = ts_2.GetAttribute();
ts_2.Reset();
@@ -114,14 +129,17 @@ public void TestLeftoverStems()
assertEquals("second stream", "dany", termAtt_2.toString());
ts_2.End();
}
- a.Dispose();
+ finally
+ {
+ IOUtils.CloseWhileHandlingException(ts_2);
+ }
}
/** Test stemming of mixed-case tokens. */
[Test]
public void TestCase()
{
- Analyzer a = getTestAnalyzer();
+ using Analyzer a = getTestAnalyzer();
AssertAnalyzesTo(a, "AGD", new String[] { "AGD", "artykuły gospodarstwa domowego" });
AssertAnalyzesTo(a, "agd", new String[] { "artykuły gospodarstwa domowego" });
@@ -133,7 +151,6 @@ public void TestCase()
AssertAnalyzesTo(a, "aarona", new String[] { "aarona" });
AssertAnalyzesTo(a, "Liście", new String[] { "liście", "liść", "list", "lista" });
- a.Dispose();
}
private void assertPOSToken(TokenStream ts, String term, params String[] tags)
@@ -166,26 +183,33 @@ private void assertPOSToken(TokenStream ts, String term, params String[] tags)
public void TestPOSAttribute()
{
using Analyzer a = getTestAnalyzer();
- using TokenStream ts = a.GetTokenStream("dummy", "liście");
- ts.Reset();
- assertPOSToken(ts, "liście",
- "subst:sg:acc:n2",
- "subst:sg:nom:n2",
- "subst:sg:voc:n2");
-
- assertPOSToken(ts, "liść",
- "subst:pl:acc:m3",
- "subst:pl:nom:m3",
- "subst:pl:voc:m3");
-
- assertPOSToken(ts, "list",
- "subst:sg:loc:m3",
- "subst:sg:voc:m3");
-
- assertPOSToken(ts, "lista",
- "subst:sg:dat:f",
- "subst:sg:loc:f");
- ts.End();
+ TokenStream ts = a.GetTokenStream("dummy", "liście");
+ try
+ {
+ ts.Reset();
+ assertPOSToken(ts, "liście",
+ "subst:sg:acc:n2",
+ "subst:sg:nom:n2",
+ "subst:sg:voc:n2");
+
+ assertPOSToken(ts, "liść",
+ "subst:pl:acc:m3",
+ "subst:pl:nom:m3",
+ "subst:pl:voc:m3");
+
+ assertPOSToken(ts, "list",
+ "subst:sg:loc:m3",
+ "subst:sg:voc:m3");
+
+ assertPOSToken(ts, "lista",
+ "subst:sg:dat:f",
+ "subst:sg:loc:f");
+ ts.End();
+ }
+ finally
+ {
+ IOUtils.CloseWhileHandlingException(ts);
+ }
}
private class MockMorfologikAnalyzer : MorfologikAnalyzer
diff --git a/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPTokenizerFactory.cs b/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPTokenizerFactory.cs
index 423094d4f1..f99c4f8d2a 100644
--- a/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPTokenizerFactory.cs
+++ b/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPTokenizerFactory.cs
@@ -123,11 +123,11 @@ public void TestClose()
//ts.SetReader(new StringReader(SENTENCES));
ts.Reset();
- ts.Dispose();
+ ts.Close();
ts.Reset();
ts.SetReader(new StringReader(SENTENCES));
AssertTokenStreamContents(ts, SENTENCES_punc);
- ts.Dispose();
+ ts.Close();
ts.Reset();
ts.SetReader(new StringReader(SENTENCES));
AssertTokenStreamContents(ts, SENTENCES_punc);
diff --git a/src/Lucene.Net.Tests.Analysis.Phonetic/TestBeiderMorseFilter.cs b/src/Lucene.Net.Tests.Analysis.Phonetic/TestBeiderMorseFilter.cs
index feb9756f92..abf6ca7699 100644
--- a/src/Lucene.Net.Tests.Analysis.Phonetic/TestBeiderMorseFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Phonetic/TestBeiderMorseFilter.cs
@@ -127,7 +127,7 @@ public void TestCustomAttribute()
}
assertEquals(12, i);
stream.End();
- stream.Dispose();
+ stream.Close();
}
}
}
diff --git a/src/Lucene.Net.Tests.Analysis.SmartCn/TestSmartChineseAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.SmartCn/TestSmartChineseAnalyzer.cs
index 703c961d0a..f048324ab2 100644
--- a/src/Lucene.Net.Tests.Analysis.SmartCn/TestSmartChineseAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.SmartCn/TestSmartChineseAnalyzer.cs
@@ -273,7 +273,7 @@ public void TestLargeDocument()
}
finally
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
}
@@ -298,7 +298,7 @@ public void TestLargeSentence()
}
finally
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
}
diff --git a/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs
index 5a9ddeb997..8e2fcec5fe 100644
--- a/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs
+++ b/src/Lucene.Net.Tests.Analysis.Stempel/Pl/TestPolishAnalyzer.cs
@@ -1,4 +1,5 @@
using Lucene.Net.Analysis.Util;
+using Lucene.Net.Util;
using NUnit.Framework;
using System;
using System.IO;
@@ -25,7 +26,7 @@ namespace Lucene.Net.Analysis.Pl
public class TestPolishAnalyzer : BaseTokenStreamTestCase
{
///
- /// This test fails with NPE when the
+ /// This test fails with NPE when the
/// stopwords file is missing in classpath
///
[Test]
@@ -71,13 +72,13 @@ public void TestRandomStrings()
}
///
- /// LUCENENET specific. The original Java implementation relied on String.subSequence(int, int) to throw an IndexOutOfBoundsException
- /// (in .NET, it would be string.SubString(int, int) and an ArgumentOutOfRangeException).
- /// However, the logic was corrected for .NET to test when the argument is negative and not
+ /// LUCENENET specific. The original Java implementation relied on String.subSequence(int, int) to throw an IndexOutOfBoundsException
+ /// (in .NET, it would be string.SubString(int, int) and an ArgumentOutOfRangeException).
+ /// However, the logic was corrected for .NET to test when the argument is negative and not
/// throw an exception, since exceptions are expensive and not meant for "normal"
/// behavior in .NET. This test case was made trying to figure out that issue (since initially an IndexOutOfRangeException,
- /// rather than ArgumentOutOfRangeException, was in the catch block which made the TestRandomStrings test fail).
- /// It will trigger the behavior that cause the second substring argument to be negative
+ /// rather than ArgumentOutOfRangeException, was in the catch block which made the TestRandomStrings test fail).
+ /// It will trigger the behavior that cause the second substring argument to be negative
/// (although that behavior no longer throws an exception).
///
[Test]
@@ -87,14 +88,22 @@ public void TestOutOfRange()
var text = "zyaolz 96619727 p";
var reader = new StringReader(text);
int remainder = 2;
- using var ts = a.GetTokenStream("dummy", (TextReader)new MockCharFilter(reader, remainder));
- ts.Reset();
+ var ts = a.GetTokenStream("dummy", (TextReader)new MockCharFilter(reader, remainder));
- while (ts.IncrementToken())
+ try
{
- }
+ ts.Reset();
+
+ while (ts.IncrementToken())
+ {
+ }
- ts.End();
+ ts.End();
+ }
+ finally
+ {
+ IOUtils.CloseWhileHandlingException(ts);
+ }
}
}
}
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/Tasks/CountingHighlighterTestTask.cs b/src/Lucene.Net.Tests.Benchmark/ByTask/Tasks/CountingHighlighterTestTask.cs
index db6b539b42..983d9e1d75 100644
--- a/src/Lucene.Net.Tests.Benchmark/ByTask/Tasks/CountingHighlighterTestTask.cs
+++ b/src/Lucene.Net.Tests.Benchmark/ByTask/Tasks/CountingHighlighterTestTask.cs
@@ -58,8 +58,7 @@ public BenchmarkHighlighterAnonymousClass(CountingHighlighterTestTask outerInsta
public override int DoHighlight(IndexReader reader, int doc, string field, Document document, Analyzer analyzer, string text)
{
TokenStream ts = TokenSources.GetAnyTokenStream(reader, doc, field, document, analyzer);
- TextFragment[]
- frag = highlighter.GetBestTextFragments(ts, text, outerInstance.m_mergeContiguous, outerInstance.m_maxFrags);
+ TextFragment[] frag = highlighter.GetBestTextFragments(ts, text, outerInstance.m_mergeContiguous, outerInstance.m_maxFrags);
numHighlightedResults += frag != null ? frag.Length : 0;
return frag != null ? frag.Length : 0;
}
@@ -71,7 +70,7 @@ protected override BenchmarkHighlighter GetBenchmarkHighlighter(Query q)
return new BenchmarkHighlighterAnonymousClass(this, m_highlighter);
// return new BenchmarkHighlighter() {
// @Override
- // public int doHighlight(IndexReader reader, int doc, String field, Document document, Analyzer analyzer, String text)
+ // public int doHighlight(IndexReader reader, int doc, String field, Document document, Analyzer analyzer, String text)
// {
// TokenStream ts = TokenSources.GetAnyTokenStream(reader, doc, field, document, analyzer);
// TextFragment []
diff --git a/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs b/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs
index 5e71b00046..1a623e77d1 100644
--- a/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs
+++ b/src/Lucene.Net.Tests.Benchmark/ByTask/TestPerfTasksLogic.cs
@@ -63,7 +63,7 @@ public override void OneTimeSetUp()
public void TestIndexAndSearchTasks()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"ResetSystemErase",
"CreateIndex",
"{ AddDoc } : 1000",
@@ -101,7 +101,7 @@ public void TestIndexAndSearchTasks()
[Test]
public void TestTimedSearchTask()
{
- String[] algLines = {
+ string[] algLines = {
"log.step=100000",
"ResetSystemErase",
"CreateIndex",
@@ -125,7 +125,7 @@ public void TestTimedSearchTask()
[Test]
public void TestBGSearchTaskThreads()
{
- String[] algLines = {
+ string[] algLines = {
"log.time.step.msec = 100",
"log.step=100000",
"ResetSystemErase",
@@ -156,7 +156,7 @@ public void TestBGSearchTaskThreads()
public void TestHighlighting()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"doc.stored=true",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -196,7 +196,7 @@ public void TestHighlighting()
public void TestHighlightingTV()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"doc.stored=true",//doc storage is required in order to have text to highlight
"doc.term.vector=true",
"doc.term.vector.offsets=true",
@@ -238,7 +238,7 @@ public void TestHighlightingTV()
public void TestHighlightingNoTvNoStore()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"doc.stored=false",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -276,7 +276,7 @@ public void TestHighlightingNoTvNoStore()
public void TestExhaustContentSource()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.SingleDocSource, Lucene.Net.Benchmark",
"content.source.log.step=1",
@@ -319,7 +319,7 @@ public void TestExhaustContentSource()
public void TestDocMakerThreadSafety()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.SortableSingleDocSource, Lucene.Net.Benchmark",
"doc.term.vector=false",
@@ -360,7 +360,7 @@ public void TestDocMakerThreadSafety()
public void TestParallelDocMaker()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -398,7 +398,7 @@ public void TestLineDocFile()
int NUM_TRY_DOCS = 50;
// Creates a line file with first 50 docs from SingleDocSource
- String[] algLines1 = {
+ string[] algLines1 = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.SingleDocSource, Lucene.Net.Benchmark",
"content.source.forever=true",
@@ -414,7 +414,7 @@ public void TestLineDocFile()
new StreamReader(
new FileStream(lineFile.FullName, FileMode.Open, FileAccess.Read), Encoding.UTF8);
int numLines = 0;
- String line;
+ string line;
while ((line = r.ReadLine()) != null)
{
if (numLines == 0 && line.StartsWith(WriteLineDocTask.FIELDS_HEADER_INDICATOR, StringComparison.Ordinal))
@@ -427,7 +427,7 @@ public void TestLineDocFile()
assertEquals("did not see the right number of docs; should be " + NUM_TRY_DOCS + " but was " + numLines, NUM_TRY_DOCS, numLines);
// Index the line docs
- String[] algLines2 = {
+ string[] algLines2 = {
"# ----- properties ",
"analyzer=Lucene.Net.Analysis.Core.WhitespaceAnalyzer, Lucene.Net.Analysis.Common",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
@@ -470,7 +470,7 @@ public void TestReadTokens()
// Read tokens from first NUM_DOCS docs from Reuters and
// then build index from the same docs
- String[] algLines1 = {
+ string[] algLines1 = {
"# ----- properties ",
"analyzer=Lucene.Net.Analysis.Core.WhitespaceAnalyzer, Lucene.Net.Analysis.Common",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
@@ -506,7 +506,7 @@ public void TestReadTokens()
Fields fields = MultiFields.GetFields(reader);
- foreach (String fieldName in fields)
+ foreach (string fieldName in fields)
{
if (fieldName.Equals(DocMaker.ID_FIELD, StringComparison.Ordinal) || fieldName.Equals(DocMaker.DATE_MSEC_FIELD, StringComparison.Ordinal) || fieldName.Equals(DocMaker.TIME_SEC_FIELD, StringComparison.Ordinal))
{
@@ -541,7 +541,7 @@ public void TestReadTokens()
public void TestParallelExhausted()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -579,7 +579,7 @@ public void TestParallelExhausted()
public void TestExhaustedLooped()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -617,7 +617,7 @@ public void TestExhaustedLooped()
public void TestCloseIndexFalse()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -666,7 +666,7 @@ public MyMergeScheduler()
public void TestMergeScheduler()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -716,7 +716,7 @@ public MyMergePolicy()
public void TestMergePolicy()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -757,7 +757,7 @@ public void TestMergePolicy()
public void TestIndexWriterSettings()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -804,7 +804,7 @@ public void TestIndexWriterSettings()
public void TestIndexingWithFacets()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -842,7 +842,7 @@ public void TestIndexingWithFacets()
public void TestForceMerge()
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -894,7 +894,7 @@ public void TestDisableCounting()
private void doTestDisableCounting(bool disable)
{
// 1. alg definition (required in every "logic" test)
- String[] algLines = disableCountingLines(disable);
+ string[] algLines = disableCountingLines(disable);
// 2. execute the algorithm (required in every "logic" test)
Benchmark benchmark = execBenchmark(algLines);
@@ -904,7 +904,7 @@ private void doTestDisableCounting(bool disable)
int nChecked = 0;
foreach (TaskStats stats in benchmark.RunData.Points.TaskStats)
{
- String taskName = stats.Task.GetName();
+ string taskName = stats.Task.GetName();
if (taskName.Equals("Rounds", StringComparison.Ordinal))
{
assertEquals("Wrong total count!", 20 + 2 * n, stats.Count);
@@ -924,10 +924,10 @@ private void doTestDisableCounting(bool disable)
assertEquals("Missing some tasks to check!", 3, nChecked);
}
- private String[] disableCountingLines(bool disable)
+ private string[] disableCountingLines(bool disable)
{
- String dis = disable ? "-" : "";
- return new String[] {
+ string dis = disable ? "-" : "";
+ return new string[] {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -983,9 +983,9 @@ public void TestLocale()
assertEquals(new CultureInfo("nb-NO"/*, "NY"*/), benchmark.RunData.Locale);
}
- private String[] getLocaleConfig(String localeParam)
+ private string[] getLocaleConfig(string localeParam)
{
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -1045,7 +1045,7 @@ public void TestCollator()
assertEqualCollation(expected, benchmark.RunData.Analyzer, "foobar");
}
- private void assertEqualCollation(Analyzer a1, Analyzer a2, String text)
+ private void assertEqualCollation(Analyzer a1, Analyzer a2, string text)
{
TokenStream ts1 = a1.GetTokenStream("bogus", text);
TokenStream ts2 = a2.GetTokenStream("bogus", text);
@@ -1062,14 +1062,14 @@ private void assertEqualCollation(Analyzer a1, Analyzer a2, String text)
assertEquals(bytes1, bytes2);
assertFalse(ts1.IncrementToken());
assertFalse(ts2.IncrementToken());
- ts1.Dispose();
- ts2.Dispose();
+ ts1.Close();
+ ts2.Close();
}
- private String[] getCollatorConfig(String localeParam,
- String collationParam)
+ private string[] getCollatorConfig(string localeParam,
+ string collationParam)
{
- String[] algLines = {
+ string[] algLines = {
"# ----- properties ",
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
@@ -1095,15 +1095,15 @@ private String[] getCollatorConfig(String localeParam,
[Test]
public void TestShingleAnalyzer()
{
- String text = "one,two,three, four five six";
+ string text = "one,two,three, four five six";
// StandardTokenizer, maxShingleSize, and outputUnigrams
Benchmark benchmark = execBenchmark(getAnalyzerFactoryConfig
("shingle-analyzer", "StandardTokenizer,ShingleFilter"));
benchmark.RunData.Analyzer.GetTokenStream
- ("bogus", text).Dispose();
+ ("bogus", text).Close();
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
- new String[] { "one", "one two", "two", "two three",
+ new string[] { "one", "one two", "two", "two three",
"three", "three four", "four", "four five",
"five", "five six", "six" });
// StandardTokenizer, maxShingleSize = 3, and outputUnigrams = false
@@ -1112,7 +1112,7 @@ public void TestShingleAnalyzer()
("shingle-analyzer",
"StandardTokenizer,ShingleFilter(maxShingleSize:3,outputUnigrams:false)"));
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
- new String[] { "one two", "one two three", "two three",
+ new string[] { "one two", "one two three", "two three",
"two three four", "three four",
"three four five", "four five",
"four five six", "five six" });
@@ -1120,7 +1120,7 @@ public void TestShingleAnalyzer()
benchmark = execBenchmark
(getAnalyzerFactoryConfig("shingle-analyzer", "WhitespaceTokenizer,ShingleFilter"));
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
- new String[] { "one,two,three,", "one,two,three, four",
+ new string[] { "one,two,three,", "one,two,three, four",
"four", "four five", "five", "five six",
"six" });
@@ -1130,13 +1130,13 @@ public void TestShingleAnalyzer()
("shingle-factory",
"WhitespaceTokenizer,ShingleFilter(outputUnigrams:false,maxShingleSize:3)"));
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
- new String[] { "one,two,three, four",
+ new string[] { "one,two,three, four",
"one,two,three, four five",
"four five", "four five six",
"five six" });
}
- private String[] getAnalyzerFactoryConfig(String name, String @params)
+ private string[] getAnalyzerFactoryConfig(string name, string @params)
{
//String singleQuoteEscapedName = name.Replace("'", "\\\\'");
//String[] algLines = {
@@ -1151,8 +1151,8 @@ private String[] getAnalyzerFactoryConfig(String name, String @params)
// "{ \"AddDocs\" AddDoc > : * "
//};
//String singleQuoteEscapedName = name.Replace("'", @"\'");
- String singleQuoteEscapedName = name.Replace("'", @"\'");
- String[] algLines = {
+ string singleQuoteEscapedName = name.Replace("'", @"\'");
+ string[] algLines = {
"content.source=Lucene.Net.Benchmarks.ByTask.Feeds.LineDocSource, Lucene.Net.Benchmark",
"docs.file=" + getReuters20LinesFile(),
"work.dir=" + getWorkDir().FullName.Replace(@"\", "/"), // Fix Windows path
@@ -1169,7 +1169,7 @@ private String[] getAnalyzerFactoryConfig(String name, String @params)
[Test]
public void TestAnalyzerFactory()
{
- String text = "Fortieth, Quarantième, Cuadragésimo";
+ string text = "Fortieth, Quarantième, Cuadragésimo";
Benchmark benchmark = execBenchmark(getAnalyzerFactoryConfig
("ascii folded, pattern replaced, standard tokenized, downcased, bigrammed.'analyzer'",
"positionIncrementGap:100,offsetGap:1111,"
@@ -1177,12 +1177,12 @@ public void TestAnalyzerFactory()
+ "PatternReplaceCharFilterFactory(pattern:'e(\\\\\\\\S*)m',replacement:\"$1xxx$1\"),"
+ "StandardTokenizer,LowerCaseFilter,NGramTokenFilter(minGramSize:2,maxGramSize:2)"));
BaseTokenStreamTestCase.AssertAnalyzesTo(benchmark.RunData.Analyzer, text,
- new String[] { "fo", "or", "rt", "ti", "ie", "et", "th",
+ new string[] { "fo", "or", "rt", "ti", "ie", "et", "th",
"qu", "ua", "ar", "ra", "an", "nt", "ti", "ix", "xx", "xx", "xe",
"cu", "ua", "ad", "dr", "ra", "ag", "gs", "si", "ix", "xx", "xx", "xs", "si", "io"});
}
- private String getReuters20LinesFile()
+ private string getReuters20LinesFile()
{
return getWorkDirResourcePath("reuters.first20.lines.txt");
}
diff --git a/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs b/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs
index 23b608f53f..cd2d1285c2 100644
--- a/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs
+++ b/src/Lucene.Net.Tests.Highlighter/Highlight/HighlighterTest.cs
@@ -1305,6 +1305,7 @@ public void TestGetTextFragments()
for (int i = 0; i < hits.TotalHits; i++)
{
String text = searcher.Doc(hits.ScoreDocs[i].Doc).Get(FIELD_NAME);
+ // TODO: #271 review disposable
TokenStream tokenStream = analyzer.GetTokenStream(FIELD_NAME, text);
Highlighter highlighter = instance.GetHighlighter(query, FIELD_NAME,
@@ -2166,21 +2167,12 @@ public override void End()
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- try
- {
- if (disposing)
- {
- this.realStream.Dispose();
- this.st?.Dispose();
- this.st = null;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
+ this.realStream.Close();
+ this.st?.Dispose();
+ this.st = null;
+ base.Close();
}
}
diff --git a/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs b/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs
index 80d9fbb5e5..1e3cf6530d 100644
--- a/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs
+++ b/src/Lucene.Net.Tests.Highlighter/VectorHighlight/AbstractTestCase.cs
@@ -193,7 +193,7 @@ protected IList analyze(String text, String field, Analyzer analyzer)
}
finally
{
- IOUtils.DisposeWhileHandlingException(tokenStream);
+ IOUtils.CloseWhileHandlingException(tokenStream);
}
return bytesRefs;
diff --git a/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs b/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs
index 88d333ab5c..ec1dacff3c 100644
--- a/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs
+++ b/src/Lucene.Net.Tests.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterTest.cs
@@ -117,7 +117,7 @@ public void TestAfterLoad()
}
///
- /// Used to return highlighted result; see
+ /// Used to return highlighted result; see
///
///
private sealed class LookupHighlightFragment
@@ -201,7 +201,7 @@ protected internal override object Highlight(string text, ICollection ma
}
finally
{
- IOUtils.DisposeWhileHandlingException(ts);
+ IOUtils.CloseWhileHandlingException(ts);
}
}
}
@@ -445,7 +445,7 @@ public void TestSuggestStopFilter()
public void TestEmptyAtStart()
{
Analyzer a = new MockAnalyzer(Random, MockTokenizer.WHITESPACE, false);
- using AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, NewDirectory(), a, a, 3); //LUCENENET UPGRADE TODO: add extra false param at version 4.11.0
+ using AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, NewDirectory(), a, a, 3); //LUCENENET UPGRADE TODO: add extra false param at version 4.11.0
suggester.Build(new InputArrayEnumerator(new Input[0]));
suggester.Add(new BytesRef("a penny saved is a penny earned"), null, 10, new BytesRef("foobaz"));
suggester.Add(new BytesRef("lend me your ear"), null, 8, new BytesRef("foobar"));
diff --git a/src/Lucene.Net.Tests.TestFramework/Analysis/TestMockAnalyzer.cs b/src/Lucene.Net.Tests.TestFramework/Analysis/TestMockAnalyzer.cs
index bbf25b2de8..2628dfadc6 100644
--- a/src/Lucene.Net.Tests.TestFramework/Analysis/TestMockAnalyzer.cs
+++ b/src/Lucene.Net.Tests.TestFramework/Analysis/TestMockAnalyzer.cs
@@ -217,15 +217,27 @@ public void TestLUCENE_3042()
String testString = "t";
Analyzer analyzer = new MockAnalyzer(Random);
- using (TokenStream stream = analyzer.GetTokenStream("dummy", testString))
+ Exception priorException = null;
+ TokenStream stream = analyzer.GetTokenStream("dummy", testString);
+
+ try
{
stream.Reset();
while (stream.IncrementToken())
{
// consume
}
+
stream.End();
}
+ catch (Exception e)
+ {
+ priorException = e;
+ }
+ finally
+ {
+ IOUtils.CloseWhileHandlingException(priorException, stream);
+ }
AssertAnalyzesTo(analyzer, testString, new String[] { "t" });
}
@@ -269,13 +281,26 @@ public void TestForwardOffsets()
StringReader reader = new StringReader(s);
MockCharFilter charfilter = new MockCharFilter(reader, 2);
MockAnalyzer analyzer = new MockAnalyzer(Random);
- using TokenStream ts = analyzer.GetTokenStream("bogus", charfilter);
- ts.Reset();
- while (ts.IncrementToken())
+ Exception priorException = null;
+ TokenStream ts = analyzer.GetTokenStream("bogus", charfilter);
+ try
+ {
+ ts.Reset();
+ while (ts.IncrementToken())
+ {
+ ;
+ }
+
+ ts.End();
+ }
+ catch (Exception e)
+ {
+ priorException = e;
+ }
+ finally
{
- ;
+ IOUtils.CloseWhileHandlingException(priorException, ts);
}
- ts.End();
}
}
@@ -314,7 +339,7 @@ public void TestWrapReader()
// LUCENENET NOTE: This has some compatibility issues with Lucene 4.8.1, but need this test when
// DelegatingAnalyzerWrapper is ported
//[Test]
- //public void TestChangeGaps()
+ //public void TestChangeGaps()
//{
// // LUCENE-5324: check that it is possible to change the wrapper's gaps
// int positionGap = Random.nextInt(1000);
diff --git a/src/Lucene.Net.Tests/Analysis/TestMockAnalyzer.cs b/src/Lucene.Net.Tests/Analysis/TestMockAnalyzer.cs
index 89478b2f81..bbb77b723a 100644
--- a/src/Lucene.Net.Tests/Analysis/TestMockAnalyzer.cs
+++ b/src/Lucene.Net.Tests/Analysis/TestMockAnalyzer.cs
@@ -263,7 +263,7 @@ public virtual void TestLUCENE_3042()
}
finally
{
- IOUtils.DisposeWhileHandlingException(priorException, stream);
+ IOUtils.CloseWhileHandlingException(priorException, stream);
}
AssertAnalyzesTo(analyzer, testString, new string[] { "t" });
@@ -327,7 +327,7 @@ public virtual void TestForwardOffsets()
}
finally
{
- IOUtils.DisposeWhileHandlingException(priorException, ts);
+ IOUtils.CloseWhileHandlingException(priorException, ts);
}
}
}
diff --git a/src/Lucene.Net.Tests/Analysis/TestNumericTokenStream.cs b/src/Lucene.Net.Tests/Analysis/TestNumericTokenStream.cs
index 9a360c9254..b046744ec3 100644
--- a/src/Lucene.Net.Tests/Analysis/TestNumericTokenStream.cs
+++ b/src/Lucene.Net.Tests/Analysis/TestNumericTokenStream.cs
@@ -36,7 +36,7 @@ public class TestNumericTokenStream : BaseTokenStreamTestCase
[Test]
public virtual void TestLongStream()
{
- using NumericTokenStream stream = new NumericTokenStream().SetInt64Value(lvalue);
+ NumericTokenStream stream = new NumericTokenStream().SetInt64Value(lvalue);
// use getAttribute to test if attributes really exist, if not an IAE will be throwed
ITermToBytesRefAttribute bytesAtt = stream.GetAttribute();
ITypeAttribute typeAtt = stream.GetAttribute();
@@ -55,14 +55,13 @@ public virtual void TestLongStream()
}
Assert.IsFalse(stream.IncrementToken(), "More tokens available");
stream.End();
- // LUCENENET specific - stream disposed above via using statement
- // stream.Dispose();
+ stream.Close();
}
[Test]
public virtual void TestIntStream()
{
- using NumericTokenStream stream = new NumericTokenStream().SetInt32Value(ivalue);
+ NumericTokenStream stream = new NumericTokenStream().SetInt32Value(ivalue);
// use getAttribute to test if attributes really exist, if not an IAE will be throwed
ITermToBytesRefAttribute bytesAtt = stream.GetAttribute();
ITypeAttribute typeAtt = stream.GetAttribute();
@@ -81,8 +80,7 @@ public virtual void TestIntStream()
}
Assert.IsFalse(stream.IncrementToken(), "More tokens available");
stream.End();
- // LUCENENET specific - stream disposed above via using statement
- // stream.Dispose();
+ stream.Close();
}
[Test]
diff --git a/src/Lucene.Net.Tests/Index/TestLongPostings.cs b/src/Lucene.Net.Tests/Index/TestLongPostings.cs
index 28b8355122..576db24d8f 100644
--- a/src/Lucene.Net.Tests/Index/TestLongPostings.cs
+++ b/src/Lucene.Net.Tests/Index/TestLongPostings.cs
@@ -95,7 +95,7 @@ private string GetRandomTerm(string other)
}
finally
{
- IOUtils.DisposeWhileHandlingException(priorException, ts);
+ IOUtils.CloseWhileHandlingException(priorException, ts);
}
}
}
diff --git a/src/Lucene.Net.Tests/Index/TestPayloads.cs b/src/Lucene.Net.Tests/Index/TestPayloads.cs
index e35cc6aa5f..a6fe317000 100644
--- a/src/Lucene.Net.Tests/Index/TestPayloads.cs
+++ b/src/Lucene.Net.Tests/Index/TestPayloads.cs
@@ -597,12 +597,9 @@ public sealed override bool IncrementToken()
return true;
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- if (disposing)
- {
- pool.Release(payload);
- }
+ pool.Release(payload);
}
}
diff --git a/src/Lucene.Net.Tests/Index/TestTermVectorsWriter.cs b/src/Lucene.Net.Tests/Index/TestTermVectorsWriter.cs
index 8267833fec..b2f9efc84f 100644
--- a/src/Lucene.Net.Tests/Index/TestTermVectorsWriter.cs
+++ b/src/Lucene.Net.Tests/Index/TestTermVectorsWriter.cs
@@ -213,7 +213,7 @@ public virtual void TestEndOffsetPositionWithCachingTokenFilter()
}
finally
{
- IOUtils.DisposeWhileHandlingException(priorException, stream);
+ IOUtils.CloseWhileHandlingException(priorException, stream);
}
w.Dispose();
diff --git a/src/Lucene.Net.Tests/Search/TestPhraseQuery.cs b/src/Lucene.Net.Tests/Search/TestPhraseQuery.cs
index c97e022293..cb668780d6 100644
--- a/src/Lucene.Net.Tests/Search/TestPhraseQuery.cs
+++ b/src/Lucene.Net.Tests/Search/TestPhraseQuery.cs
@@ -678,7 +678,7 @@ public virtual void TestRandomPhrases()
}
finally
{
- IOUtils.DisposeWhileHandlingException(priorException, ts);
+ IOUtils.CloseWhileHandlingException(priorException, ts);
}
}
else
diff --git a/src/Lucene.Net/Analysis/Analyzer.cs b/src/Lucene.Net/Analysis/Analyzer.cs
index 8e8a4d7949..8477d74d6d 100644
--- a/src/Lucene.Net/Analysis/Analyzer.cs
+++ b/src/Lucene.Net/Analysis/Analyzer.cs
@@ -1,4 +1,5 @@
-using Lucene.Net.Util;
+using Lucene.Net.Index;
+using Lucene.Net.Util;
using System;
using System.Collections.Generic;
using System.IO;
@@ -34,7 +35,7 @@ namespace Lucene.Net.Analysis
///
/// Simple example:
///
- /// Analyzer analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
+ /// Analyzer analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
/// {
/// Tokenizer source = new FooTokenizer(reader);
/// TokenStream filter = new FooFilter(source);
@@ -96,9 +97,9 @@ protected Analyzer(ReuseStrategy reuseStrategy) // LUCENENET: CA1012: Abstract t
///
/// Creates a new instance with the ability to specify the body of the
/// method through the parameter.
- /// Simple example:
+ /// Simple example:
///
- /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
+ /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
/// {
/// Tokenizer source = new FooTokenizer(reader);
/// TokenStream filter = new FooFilter(source);
@@ -110,8 +111,8 @@ protected Analyzer(ReuseStrategy reuseStrategy) // LUCENENET: CA1012: Abstract t
/// LUCENENET specific
///
///
- /// A delegate method that represents (is called by) the
- /// method. It accepts a fieldName and a reader and
+ /// A delegate method that represents (is called by) the
+ /// method. It accepts a fieldName and a reader and
/// returns the for this analyzer.
///
/// A new instance.
@@ -123,9 +124,9 @@ public static Analyzer NewAnonymous(Func
/// Creates a new instance with the ability to specify the body of the
/// method through the parameter and allows the use of a .
- /// Simple example:
+ /// Simple example:
///
- /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
+ /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
/// {
/// Tokenizer source = new FooTokenizer(reader);
/// TokenStream filter = new FooFilter(source);
@@ -137,8 +138,8 @@ public static Analyzer NewAnonymous(Func
///
- /// An delegate method that represents (is called by) the
- /// method. It accepts a fieldName and a reader and
+ /// An delegate method that represents (is called by) the
+ /// method. It accepts a fieldName and a reader and
/// returns the for this analyzer.
///
/// A custom instance.
@@ -152,15 +153,15 @@ public static Analyzer NewAnonymous(Func
/// method through the parameter and the body of the
/// method through the parameter.
- /// Simple example:
+ /// Simple example:
///
- /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
+ /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
/// {
/// Tokenizer source = new FooTokenizer(reader);
/// TokenStream filter = new FooFilter(source);
/// filter = new BarFilter(filter);
/// return new TokenStreamComponents(source, filter);
- /// }, initReader: (fieldName, reader) =>
+ /// }, initReader: (fieldName, reader) =>
/// {
/// return new HTMLStripCharFilter(reader);
/// });
@@ -169,12 +170,12 @@ public static Analyzer NewAnonymous(Func
///
- /// A delegate method that represents (is called by) the
- /// method. It accepts a fieldName and a reader and
+ /// A delegate method that represents (is called by) the
+ /// method. It accepts a fieldName and a reader and
/// returns the for this analyzer.
///
/// A delegate method that represents (is called by) the
- /// method. It accepts a fieldName and a reader and
+ /// method. It accepts a fieldName and a reader and
/// returns the that can be modified or wrapped by the method.
/// A new instance.
public static Analyzer NewAnonymous(Func createComponents, Func initReader)
@@ -186,15 +187,15 @@ public static Analyzer NewAnonymous(Func
/// method through the parameter, the body of the
/// method through the parameter, and allows the use of a .
- /// Simple example:
+ /// Simple example:
///
- /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
+ /// var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
/// {
/// Tokenizer source = new FooTokenizer(reader);
/// TokenStream filter = new FooFilter(source);
/// filter = new BarFilter(filter);
/// return new TokenStreamComponents(source, filter);
- /// }, initReader: (fieldName, reader) =>
+ /// }, initReader: (fieldName, reader) =>
/// {
/// return new HTMLStripCharFilter(reader);
/// }, reuseStrategy);
@@ -203,12 +204,12 @@ public static Analyzer NewAnonymous(Func
///
- /// A delegate method that represents (is called by) the
- /// method. It accepts a fieldName and a reader and
+ /// A delegate method that represents (is called by) the
+ /// method. It accepts a fieldName and a reader and
/// returns the for this analyzer.
///
/// A delegate method that represents (is called by) the
- /// method. It accepts a fieldName and a reader and
+ /// method. It accepts a fieldName and a reader and
/// returns the that can be modified or wrapped by the method.
/// A custom instance.
/// A new instance.
@@ -275,7 +276,7 @@ public TokenStream GetTokenStream(string fieldName, TextReader reader)
/// method will reuse the previously stored components after resetting them
/// through .
///
- /// NOTE: After calling this method, the consumer must follow the
+ /// NOTE: After calling this method, the consumer must follow the
/// workflow described in to properly consume its contents.
/// See the namespace documentation for
/// some examples demonstrating this.
@@ -314,7 +315,7 @@ public TokenStream GetTokenStream(string fieldName, string text)
/// The default implementation returns
/// unchanged.
///
- /// name being indexed
+ /// name being indexed
/// original
/// reader, optionally decorated with (s)
protected internal virtual TextReader InitReader(string fieldName, TextReader reader)
@@ -323,16 +324,16 @@ protected internal virtual TextReader InitReader(string fieldName, TextReader re
}
///
- /// Invoked before indexing a instance if
+ /// Invoked before indexing a instance if
/// terms have already been added to that field. This allows custom
/// analyzers to place an automatic position increment gap between
- /// instances using the same field name. The default value
+ /// instances using the same field name. The default value
/// position increment gap is 0. With a 0 position increment gap and
/// the typical default token position increment of 1, all terms in a field,
- /// including across instances, are in successive positions, allowing
- /// exact matches, for instance, across instance boundaries.
+ /// including across instances, are in successive positions, allowing
+ /// exact matches, for instance, across instance boundaries.
///
- /// name being indexed.
+ /// name being indexed.
/// position increment gap, added to the next token emitted from .
/// this value must be >= 0.
public virtual int GetPositionIncrementGap(string fieldName)
@@ -360,7 +361,7 @@ public virtual int GetOffsetGap(string fieldName)
public ReuseStrategy Strategy => reuseStrategy;
///
- /// Frees persistent resources used by this
+ /// Frees persistent resources used by this
///
public void Dispose()
{
@@ -369,7 +370,7 @@ public void Dispose()
}
///
- /// Frees persistent resources used by this
+ /// Frees persistent resources used by this
///
protected virtual void Dispose(bool disposing)
{
@@ -459,18 +460,35 @@ public override void SetReusableComponents(Analyzer analyzer, string fieldName,
if (componentsPerField is null)
{
// LUCENENET-615: This needs to support nullable keys
- componentsPerField = new JCG.Dictionary();
+ componentsPerField = new TokenStreamComponentsDictionary();
SetStoredValue(analyzer, componentsPerField);
}
componentsPerField[fieldName] = components;
}
+
+ ///
+ /// A dictionary that supports disposing of the values when the dictionary is disposed.
+ ///
+ ///
+ private class TokenStreamComponentsDictionary
+ : JCG.Dictionary, IDisposable
+ {
+ public void Dispose()
+ {
+ foreach (var kvp in this)
+ {
+ kvp.Value?.Dispose();
+ }
+ Clear();
+ }
+ }
}
///
/// LUCENENET specific helper class to mimick Java's ability to create anonymous classes.
/// Clearly, the design of took this feature of Java into consideration.
/// Since it doesn't exist in .NET, we can use a delegate method to call the constructor of
- /// this concrete instance to fake it (by calling an overload of
+ /// this concrete instance to fake it (by calling an overload of
/// ).
///
private class AnonymousAnalyzer : Analyzer
@@ -508,7 +526,17 @@ protected internal override TextReader InitReader(string fieldName, TextReader r
/// returned by
/// .
///
- public class TokenStreamComponents
+ ///
+ /// LUCENENET: This class implements IDisposable so that any TokenStream implementations
+ /// that need to be disposed are disposed when the Analyzer that stores this in its
+ /// stored value is disposed.
+ ///
+ /// Because it's impossible to know if the would dispose of the ,
+ /// this class calls on both if they are not reference equal.
+ /// Implementations of should be careful to make their
+ /// code idempotent so that calling multiple times has no effect.
+ ///
+ public class TokenStreamComponents : IDisposable
{
///
/// Original source of the tokens.
@@ -573,6 +601,25 @@ protected internal virtual void SetReader(TextReader reader)
///
/// Component's
public virtual Tokenizer Tokenizer => m_source;
+
+ ///
+ /// Disposes of the and .
+ ///
+ ///
+ /// LUCENENET specific: see remarks on the class.
+ ///
+ public void Dispose()
+ {
+ m_source?.Dispose();
+
+ if (!ReferenceEquals(m_source, m_sink))
+ {
+ m_sink?.Dispose();
+ }
+
+ reusableStringReader?.Dispose();
+ GC.SuppressFinalize(this);
+ }
}
///
@@ -631,4 +678,4 @@ protected internal static void SetStoredValue(Analyzer analyzer, object storedVa
analyzer.storedValue.Value = storedValue;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Analysis/CachingTokenFilter.cs b/src/Lucene.Net/Analysis/CachingTokenFilter.cs
index 41e321972a..3bf71d65e1 100644
--- a/src/Lucene.Net/Analysis/CachingTokenFilter.cs
+++ b/src/Lucene.Net/Analysis/CachingTokenFilter.cs
@@ -102,27 +102,16 @@ private void FillCache()
}
///
- /// Releases resources used by the and
- /// if overridden in a derived class, optionally releases unmanaged resources.
+ /// Releases resources used by the .
///
- /// true to release both managed and unmanaged resources;
- /// false to release only unmanaged resources.
-
- // LUCENENET specific
- protected override void Dispose(bool disposing)
+ ///
+ /// LUCENENET specific
+ ///
+ public override void Close()
{
- try
- {
- if (disposing)
- {
- iterator?.Dispose();
- iterator = null;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
+ iterator?.Dispose();
+ iterator = null;
+ base.Close();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Analysis/TokenFilter.cs b/src/Lucene.Net/Analysis/TokenFilter.cs
index fc5630a943..b43bb2a52d 100644
--- a/src/Lucene.Net/Analysis/TokenFilter.cs
+++ b/src/Lucene.Net/Analysis/TokenFilter.cs
@@ -1,5 +1,4 @@
-using System;
-using System.IO;
+using System.IO;
namespace Lucene.Net.Analysis
{
@@ -29,11 +28,13 @@ namespace Lucene.Net.Analysis
public abstract class TokenFilter : TokenStream
{
///
- /// The source of tokens for this filter.
+ /// The source of tokens for this filter.
+ ///
protected readonly TokenStream m_input;
///
- /// Construct a token stream filtering the given input.
+ /// Construct a token stream filtering the given input.
+ ///
protected TokenFilter(TokenStream input)
: base(input)
{
@@ -41,24 +42,13 @@ protected TokenFilter(TokenStream input)
}
///
- /// This method is called by the consumer after the last token has been
- /// consumed, after returned false
- /// (using the new API). Streams implementing the old API
- /// should upgrade to use this feature.
- ///
- /// This method can be used to perform any end-of-stream operations, such as
- /// setting the final offset of a stream. The final offset of a stream might
- /// differ from the offset of the last token eg in case one or more whitespaces
- /// followed after the last token, but a WhitespaceTokenizer was used.
- ///
- /// Additionally any skipped positions (such as those removed by a stopfilter)
- /// can be applied to the position increment, or any adjustment of other
- /// attributes where the end-of-stream value may be important.
- ///
+ ///
+ ///
+ ///
/// NOTE:
/// The default implementation chains the call to the input TokenStream, so
/// be sure to call base.End() first when overriding this method.
- ///
+ ///
/// If an I/O error occurs
public override void End()
{
@@ -66,35 +56,16 @@ public override void End()
}
///
- /// Releases resources associated with this stream.
- ///
- /// If you override this method, always call base.Dispose(disposing), otherwise
- /// some internal state will not be correctly reset (e.g., will
- /// throw on reuse).
- ///
- /// NOTE:
- /// The default implementation chains the call to the input TokenStream, so
- /// be sure to call base.Dispose(disposing) when overriding this method.
+ ///
///
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- if (disposing)
- {
- m_input.Dispose();
- }
- base.Dispose(disposing); // LUCENENET specific - disposable pattern requires calling the base class implementation
+ m_input.Close();
+ base.Close();
}
///
- /// This method is called by a consumer before it begins consumption using
- /// .
- ///
- /// Resets this stream to a clean state. Stateful implementations must implement
- /// this method so that they can be reused, just as if they had been created fresh.
- ///
- /// If you override this method, always call base.Reset(), otherwise
- /// some internal state will not be correctly reset (e.g., will
- /// throw on further usage).
+ ///
///
///
/// NOTE:
@@ -106,4 +77,4 @@ public override void Reset()
m_input.Reset();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Analysis/TokenStream.cs b/src/Lucene.Net/Analysis/TokenStream.cs
index 0d23c42059..07f891aa60 100644
--- a/src/Lucene.Net/Analysis/TokenStream.cs
+++ b/src/Lucene.Net/Analysis/TokenStream.cs
@@ -1,4 +1,6 @@
using Lucene.Net.Analysis.TokenAttributes;
+using Lucene.Net.Index;
+using Lucene.Net.Util;
using System;
using System.IO;
@@ -36,13 +38,13 @@ namespace Lucene.Net.Analysis
/// A new API has been introduced with Lucene 2.9. this API
/// has moved from being -based to -based. While
/// still exists in 2.9 as a convenience class, the preferred way
- /// to store the information of a is to use s.
+ /// to store the information of a is to use s.
///
/// now extends , which provides
/// access to all of the token s for the .
- /// Note that only one instance per is created and reused
+ /// Note that only one instance per is created and reused
/// for every token. This approach reduces object creation and allows local
- /// caching of references to the s. See
+ /// caching of references to the s. See
/// for further details.
///
/// The workflow of the new API is as follows:
@@ -56,7 +58,7 @@ namespace Lucene.Net.Analysis
/// consuming the attributes after each call.
/// - The consumer calls so that any end-of-stream operations
/// can be performed.
- /// - The consumer calls to release any resource when finished
+ ///
- The consumer calls to release any resource when finished
/// using the .
///
/// To make sure that filters and consumers know which attributes are available,
@@ -64,7 +66,7 @@ namespace Lucene.Net.Analysis
/// not required to check for availability of attributes in
/// .
///
- /// You can find some example code for the new API in the analysis
+ /// You can find some example code for the new API in the analysis
/// documentation.
///
/// Sometimes it is desirable to capture a current state of a ,
@@ -76,7 +78,7 @@ namespace Lucene.Net.Analysis
/// Therefore all non-abstract subclasses must be sealed or have at least a sealed
/// implementation of ! This is checked when assertions are enabled.
///
- public abstract class TokenStream : AttributeSource, IDisposable
+ public abstract class TokenStream : AttributeSource, ICloseable, IDisposable
{
///
/// A using the default attribute factory.
@@ -98,7 +100,7 @@ protected TokenStream(AttributeSource input)
}
///
- /// A using the supplied
+ /// A using the supplied
/// for creating new instances.
///
protected TokenStream(AttributeFactory factory)
@@ -109,7 +111,7 @@ protected TokenStream(AttributeFactory factory)
}
///
- /// Consumers (i.e., ) use this method to advance the stream to
+ /// Consumers (i.e., ) use this method to advance the stream to
/// the next token. Implementing classes must implement this method and update
/// the appropriate s with the attributes of the next
/// token.
@@ -177,6 +179,21 @@ public virtual void Reset()
{
}
+ ///
+ /// Releases resources associated with this stream.
+ ///
+ /// If you override this method, always call base.Close(), otherwise
+ /// some internal state will not be correctly reset (e.g., will
+ /// throw on reuse).
+ ///
+ ///
+ /// LUCENENET notes - this is intended to release resources in a way that allows the
+ /// object to be reused, so it is not the same as .
+ ///
+ public virtual void Close()
+ {
+ }
+
// LUCENENET specific - implementing proper dispose pattern
public void Dispose()
{
@@ -185,14 +202,14 @@ public void Dispose()
}
///
- /// Releases resources associated with this stream.
+ /// Releases resources associated with this stream, in a way such that the stream is not reusable.
///
- /// If you override this method, always call base.Dispose(disposing), otherwise
- /// some internal state will not be correctly reset (e.g., will
- /// throw on reuse).
+ /// If you override this method, always call base.Dispose(disposing).
+ /// Also, ensure that your implementation is idempotent as it may be called multiple times.
///
+ ///
protected virtual void Dispose(bool disposing)
{
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Analysis/Tokenizer.cs b/src/Lucene.Net/Analysis/Tokenizer.cs
index 4fa95e0172..587ea9a82d 100644
--- a/src/Lucene.Net/Analysis/Tokenizer.cs
+++ b/src/Lucene.Net/Analysis/Tokenizer.cs
@@ -33,11 +33,13 @@ namespace Lucene.Net.Analysis
public abstract class Tokenizer : TokenStream
{
///
- /// The text source for this .
+ /// The text source for this .
+ ///
protected TextReader m_input = ILLEGAL_STATE_READER;
///
- /// Pending reader: not actually assigned to input until
+ /// Pending reader: not actually assigned to input until
+ ///
private TextReader inputPending = ILLEGAL_STATE_READER;
///
@@ -57,29 +59,16 @@ protected Tokenizer(AttributeFactory factory, TextReader input)
}
///
- /// Releases resources associated with this stream.
- ///
- /// If you override this method, always call base.Dispose(disposing), otherwise
- /// some internal state will not be correctly reset (e.g., will
- /// throw on reuse).
+ ///
///
- ///
- /// NOTE:
- /// The default implementation closes the input , so
- /// be sure to call base.Dispose(disposing) when overriding this method.
- ///
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- if (disposing)
- {
- m_input.Dispose();
- inputPending.Dispose(); // LUCENENET specific: call dispose on input pending
- // LUCENE-2387: don't hold onto TextReader after close, so
- // GC can reclaim
- inputPending = ILLEGAL_STATE_READER;
- m_input = ILLEGAL_STATE_READER;
- }
- base.Dispose(disposing); // LUCENENET specific - disposable pattern requires calling the base class implementation
+ inputPending.Dispose(); // LUCENENET specific: call dispose on input pending
+ m_input.Dispose();
+ // LUCENE-2387: don't hold onto Reader after close, so
+ // GC can reclaim
+ inputPending = m_input = ILLEGAL_STATE_READER;
+ base.Close();
}
///
@@ -102,7 +91,7 @@ public void SetReader(TextReader input)
{
if (input is null)
{
- throw new ArgumentNullException(nameof(input), "input must not be null"); // LUCENENET specific - changed from IllegalArgumentException to ArgumentOutOfRangeException (.NET convention)
+ throw new ArgumentNullException(nameof(input), "input must not be null"); // LUCENENET specific - changed from IllegalArgumentException to ArgumentNullException (.NET convention)
}
else if (this.m_input != ILLEGAL_STATE_READER)
{
@@ -131,7 +120,7 @@ private sealed class ReaderAnonymousClass : TextReader
{
public override int Read(char[] cbuf, int off, int len)
{
- throw IllegalStateException.Create("TokenStream contract violation: Reset()/Dispose() call missing, "
+ throw IllegalStateException.Create("TokenStream contract violation: Reset()/Dispose() call missing, "
+ "Reset() called multiple times, or subclass does not call base.Reset(). "
+ "Please see the documentation of TokenStream class for more information about the correct consuming workflow.");
}
@@ -142,4 +131,4 @@ protected override void Dispose(bool disposing)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Document/Field.cs b/src/Lucene.Net/Document/Field.cs
index 5b7532ee0a..94a932c14c 100644
--- a/src/Lucene.Net/Document/Field.cs
+++ b/src/Lucene.Net/Document/Field.cs
@@ -35,8 +35,8 @@ namespace Lucene.Net.Documents
///
/// Expert: directly create a field for a document. Most
- /// users should use one of the sugar subclasses: ,
- /// , , ,
+ /// users should use one of the sugar subclasses: ,
+ /// , , ,
/// , ,
/// , ,
/// , .
@@ -44,7 +44,7 @@ namespace Lucene.Net.Documents
/// A field is a section of a . Each field has three
/// parts: name, type and value. Values may be text
/// (, or pre-analyzed ), binary
- /// (), or numeric (, , , or ).
+ /// (), or numeric (, , , or ).
/// Fields are optionally stored in the
/// index, so that they may be returned with hits on the document.
///
@@ -72,7 +72,7 @@ public partial class Field : IIndexableField, IFormattable
private object fieldsData;
///
- /// Field's value
+ /// Field's value
///
/// Setting this property will automatically set the backing field for the
/// property.
@@ -320,7 +320,7 @@ public virtual string GetStringValue() // LUCENENET specific: Added verb Get to
/// An object that supplies culture-specific formatting information. This parameter has no effect if this field is non-numeric.
/// The string representation of the value if it is either a or numeric type.
// LUCENENET specific overload.
- public virtual string GetStringValue(IFormatProvider provider)
+ public virtual string GetStringValue(IFormatProvider provider)
{
return GetStringValue(null, provider);
}
@@ -333,7 +333,7 @@ public virtual string GetStringValue(IFormatProvider provider)
/// A standard or custom numeric format string. This parameter has no effect if this field is non-numeric.
/// The string representation of the value if it is either a or numeric type.
// LUCENENET specific overload.
- public virtual string GetStringValue(string format)
+ public virtual string GetStringValue(string format)
{
return GetStringValue(format, null);
}
@@ -411,7 +411,7 @@ public virtual void SetStringValue(string value)
}
///
- /// Expert: change the value of this field. See
+ /// Expert: change the value of this field. See
/// .
///
public virtual void SetReaderValue(TextReader value)
@@ -532,7 +532,7 @@ public virtual void SetDoubleValue(double value)
// LUCENENET TODO: Add SetValue() overloads for each type?
// Upside: Simpler API.
- // Downside: Must be vigilant about what type is passed or the wrong overload will be called and will get a runtime exception.
+ // Downside: Must be vigilant about what type is passed or the wrong overload will be called and will get a runtime exception.
///
/// Expert: sets the token stream to be used for indexing and causes
@@ -602,15 +602,15 @@ public virtual object GetNumericValue() // LUCENENET specific: Added verb Get to
///
/// Gets the of the underlying value, or if the value is not set or non-numeric.
///
- /// Expert: The difference between this property and is
+ /// Expert: The difference between this property and is
/// this is represents the current state of the field (whether being written or read) and the
/// property represents instructions on how the field will be written,
/// but does not re-populate when reading back from an index (it is write-only).
///
- /// In Java, the numeric type was determined by checking the type of
+ /// In Java, the numeric type was determined by checking the type of
/// . However, since there are no reference number
/// types in .NET, using so will cause boxing/unboxing. It is
- /// therefore recommended to use this property to check the underlying type and the corresponding
+ /// therefore recommended to use this property to check the underlying type and the corresponding
/// Get*Value() method to retrieve the value.
///
/// NOTE: Since Lucene codecs do not support or ,
@@ -770,7 +770,7 @@ public override string ToString()
}
///
- /// Prints a for human consumption.
+ /// Prints a for human consumption.
///
/// A standard or custom numeric format string. This parameter has no effect if this field is non-numeric.
// LUCENENET specific - method added for better .NET compatibility
@@ -954,12 +954,9 @@ public override void Reset()
used = false;
}
- protected override void Dispose(bool disposing)
+ public override void Close()
{
- if (disposing)
- {
- value = null;
- }
+ value = null;
}
}
@@ -985,7 +982,7 @@ public enum Store
//
///
- /// Specifies whether and how a field should be indexed.
+ /// Specifies whether and how a field should be indexed.
///
[Obsolete("This is here only to ease transition from the pre-4.0 APIs.")]
public enum Index
@@ -1035,13 +1032,13 @@ public enum Index
}
///
- /// Specifies whether and how a field should have term vectors.
+ /// Specifies whether and how a field should have term vectors.
///
[Obsolete("This is here only to ease transition from the pre-4.0 APIs.")]
public enum TermVector
{
///
- /// Do not store term vectors.
+ /// Do not store term vectors.
///
NO,
@@ -1197,7 +1194,7 @@ public Field(string name, TextReader reader)
}
///
- /// Create a tokenized and indexed field that is not stored, optionally with
+ /// Create a tokenized and indexed field that is not stored, optionally with
/// storing term vectors. The is read only when the is added to the index,
/// i.e. you may not close the until
/// has been called.
@@ -1229,7 +1226,7 @@ public Field(string name, TokenStream tokenStream)
}
///
- /// Create a tokenized and indexed field that is not stored, optionally with
+ /// Create a tokenized and indexed field that is not stored, optionally with
/// storing term vectors. This is useful for pre-analyzed fields.
/// The is read only when the is added to the index,
/// i.e. you may not close the until
@@ -1412,7 +1409,7 @@ public static Field.Index ToIndex(bool indexed, bool analyed)
}
[Obsolete("This is here only to ease transition from the pre-4.0 APIs.")]
- public static Field.Index ToIndex(bool indexed, bool analyzed, bool omitNorms)
+ public static Field.Index ToIndex(bool indexed, bool analyzed, bool omitNorms)
{
// If it is not indexed nothing else matters
if (!indexed)
@@ -1473,8 +1470,8 @@ public static Field.TermVector ToTermVector(bool stored, bool withOffsets, bool
// LUCENENET specific
// Since we have more numeric types on Field than on FieldType,
// a new enumeration was created for .NET. In Java, this type was
- // determined by checking the data type of the Field.numericValue()
- // method. However, since the corresponding GetNumericValue() method
+ // determined by checking the data type of the Field.numericValue()
+ // method. However, since the corresponding GetNumericValue() method
// in .NET returns type object (which would result in boxing/unboxing),
// this has been refactored to use an enumeration instead, which makes the
// API easier to use.
@@ -1511,8 +1508,8 @@ public enum NumericFieldType
SINGLE,
///
- /// 64-bit double numeric type
+ /// 64-bit double numeric type
///
DOUBLE
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Index/DocInverterPerField.cs b/src/Lucene.Net/Index/DocInverterPerField.cs
index 97e91f3bbe..2722d11627 100644
--- a/src/Lucene.Net/Index/DocInverterPerField.cs
+++ b/src/Lucene.Net/Index/DocInverterPerField.cs
@@ -223,11 +223,11 @@ public override void ProcessFields(IIndexableField[] fields, int count)
{
if (!succeededInProcessingField)
{
- IOUtils.DisposeWhileHandlingException(stream);
+ IOUtils.CloseWhileHandlingException(stream);
}
else
{
- stream.Dispose();
+ stream.Close();
}
if (!succeededInProcessingField && docState.infoStream.IsEnabled("DW"))
{
@@ -250,4 +250,4 @@ public override void ProcessFields(IIndexableField[] fields, int count)
internal override FieldInfo FieldInfo => fieldInfo;
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Support/Util/ICloseable.cs b/src/Lucene.Net/Support/Util/ICloseable.cs
new file mode 100644
index 0000000000..d07626bbc9
--- /dev/null
+++ b/src/Lucene.Net/Support/Util/ICloseable.cs
@@ -0,0 +1,35 @@
+namespace Lucene.Net.Util
+{
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ ///
+ /// Represents a source or destination of data that can be closed.
+ ///
+ ///
+ /// LUCENENET specific - this interface is to be used when a class
+ /// is designed to be reusable after being closed, unlike IDisposable,
+ /// when the instance is no longer usable after being disposed.
+ ///
+ public interface ICloseable
+ {
+ ///
+ /// Closes this object in a way that allows it to be reused.
+ ///
+ void Close();
+ }
+}
diff --git a/src/Lucene.Net/Util/CloseableThreadLocal.cs b/src/Lucene.Net/Util/CloseableThreadLocal.cs
index c36b51ac11..004e019958 100644
--- a/src/Lucene.Net/Util/CloseableThreadLocal.cs
+++ b/src/Lucene.Net/Util/CloseableThreadLocal.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Util
///
/// This class works around the issue by using an alternative approach than using .
/// It keeps track of each thread's local and global state in order to later optimize garbage collection.
- /// A complete explanation can be found at
+ /// A complete explanation can be found at
///
/// https://ayende.com/blog/189793-A/the-design-and-implementation-of-a-better-threadlocal-t.
///
@@ -169,6 +169,21 @@ public void Dispose()
if (copy is null)
return;
+ foreach (var value in copy.Values)
+ {
+ if (value is IDisposable disposable)
+ {
+ try
+ {
+ disposable.Dispose();
+ }
+ catch
+ {
+ // ignored
+ }
+ }
+ }
+
Interlocked.Increment(ref globalVersion);
_disposed = true;
_values = null;
@@ -298,4 +313,4 @@ private sealed class LocalState
public int localVersion;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/IOUtils.cs b/src/Lucene.Net/Util/IOUtils.cs
index dc17cdbdd9..cd2057b6ea 100644
--- a/src/Lucene.Net/Util/IOUtils.cs
+++ b/src/Lucene.Net/Util/IOUtils.cs
@@ -58,8 +58,66 @@ public static class IOUtils // LUCENENET specific - made static
public static readonly string UTF_8 = "UTF-8";
///
- /// Disposes all given IDisposables, suppressing all thrown exceptions. Some of the IDisposables
- /// may be null, they are ignored. After everything is disposed, method either throws ,
+ /// Closes all given s. Some of the
+ /// s may be null; they are
+ /// ignored. After everything is closed, the method either
+ /// throws the first exception it hit while closing, or
+ /// completes normally if there were no exceptions.
+ ///
+ /// Objects to call on
+ public static void Close(params ICloseable[] objects)
+ {
+ Exception th = null;
+
+ foreach (ICloseable @object in objects)
+ {
+ try
+ {
+ @object?.Close();
+ }
+ catch (Exception t) when (t.IsThrowable())
+ {
+ AddSuppressed(th, t);
+ if (th is null)
+ {
+ th = t;
+ }
+ }
+ }
+
+ ReThrow(th);
+ }
+
+ ///
+ /// Closes all given s.
+ ///
+ ///
+ public static void Close(IEnumerable objects)
+ {
+ Exception th = null;
+
+ foreach (ICloseable @object in objects)
+ {
+ try
+ {
+ @object?.Close();
+ }
+ catch (Exception t) when (t.IsThrowable())
+ {
+ AddSuppressed(th, t);
+ if (th is null)
+ {
+ th = t;
+ }
+ }
+ }
+
+ ReThrow(th);
+ }
+
+ ///
+ /// Closes all given ICloseables, suppressing all thrown exceptions. Some of the ICloseables
+ /// may be null, they are ignored. After everything is closed, method either throws ,
/// if one is supplied, or the first of suppressed exceptions, or completes normally.
/// Sample usage:
///
@@ -82,66 +140,107 @@ public static class IOUtils // LUCENENET specific - made static
///
///
/// null or an exception that will be rethrown after method completion.
- /// Objects to call on.
- [Obsolete("Use DisposeWhileHandlingException(Exception, params IDisposable[]) instead.")]
- public static void CloseWhileHandlingException(Exception priorException, params IDisposable[] objects)
+ /// Objects to call on.
+ public static void CloseWhileHandlingException(Exception priorException, params ICloseable[] objects)
{
- DisposeWhileHandlingException(priorException, objects);
- }
+ Exception th = null;
- ///
- /// Disposes all given s, suppressing all thrown exceptions.
- ///
- [Obsolete("Use DisposeWhileHandlingException(Exception, IEnumerable) instead.")]
- public static void CloseWhileHandlingException(Exception priorException, IEnumerable objects)
- {
- DisposeWhileHandlingException(priorException, objects);
+ foreach (ICloseable @object in objects)
+ {
+ try
+ {
+ @object?.Close();
+ }
+ catch (Exception t) when (t.IsThrowable())
+ {
+ AddSuppressed(priorException ?? th, t);
+ if (th is null)
+ {
+ th = t;
+ }
+ }
+ }
+
+ if (priorException != null)
+ {
+ ExceptionDispatchInfo.Capture(priorException).Throw(); // LUCENENET: Rethrow to preserve stack details from the original throw
+ }
+ else
+ {
+ ReThrow(th);
+ }
}
///
- /// Disposes all given s. Some of the
- /// s may be null; they are
- /// ignored. After everything is closed, the method either
- /// throws the first exception it hit while closing, or
- /// completes normally if there were no exceptions.
+ /// Closes all given s, suppressing all thrown exceptions.
///
- ///
- /// Objects to call on
- [Obsolete("Use Dispose(params IDisposable[]) instead.")]
- public static void Close(params IDisposable[] objects)
+ ///
+ public static void CloseWhileHandlingException(Exception priorException, IEnumerable objects)
{
- Dispose(objects);
- }
+ Exception th = null;
- ///
- /// Disposes all given s.
- ///
- [Obsolete("Use Dispose(IEnumerable) instead.")]
- public static void Close(IEnumerable objects)
- {
- Dispose(objects);
+ foreach (ICloseable @object in objects)
+ {
+ try
+ {
+ @object?.Close();
+ }
+ catch (Exception t) when (t.IsThrowable())
+ {
+ AddSuppressed(priorException ?? th, t);
+ if (th is null)
+ {
+ th = t;
+ }
+ }
+ }
+
+ if (priorException != null)
+ {
+ ExceptionDispatchInfo.Capture(priorException).Throw(); // LUCENENET: Rethrow to preserve stack details from the original throw
+ }
+ else
+ {
+ ReThrow(th);
+ }
}
///
- /// Disposes all given s, suppressing all thrown exceptions.
- /// Some of the s may be null, they are ignored.
+ /// Closes all given s, suppressing all thrown exceptions.
+ /// Some of the s may be null, they are ignored.
///
- ///
- /// Objects to call on
- [Obsolete("Use DisposeWhileHandlingException(params IDisposable[]) instead.")]
- public static void CloseWhileHandlingException(params IDisposable[] objects)
+ /// Objects to call on
+ public static void CloseWhileHandlingException(params ICloseable[] objects)
{
- DisposeWhileHandlingException(objects);
+ foreach (ICloseable @object in objects)
+ {
+ try
+ {
+ @object?.Close();
+ }
+ catch (Exception t) when (t.IsThrowable())
+ {
+ }
+ }
}
///
- /// Disposes all given s, suppressing all thrown exceptions.
- ///
- ///
- [Obsolete("Use DisposeWhileHandlingException(IEnumerable) instead.")]
- public static void CloseWhileHandlingException(IEnumerable objects)
+ /// Closes all given s, suppressing all thrown exceptions.
+ ///
+ ///
+ ///
+ public static void CloseWhileHandlingException(IEnumerable objects)
{
- DisposeWhileHandlingException(objects);
+ foreach (ICloseable @object in objects)
+ {
+ try
+ {
+ @object?.Close();
+ }
+ catch (Exception t) when (t.IsThrowable())
+ {
+ }
+ }
}
diff --git a/src/Lucene.Net/Util/QueryBuilder.cs b/src/Lucene.Net/Util/QueryBuilder.cs
index 05ba3a0b04..0e1c5b69ed 100644
--- a/src/Lucene.Net/Util/QueryBuilder.cs
+++ b/src/Lucene.Net/Util/QueryBuilder.cs
@@ -250,7 +250,7 @@ protected Query CreateFieldQuery(Analyzer analyzer, Occur @operator, string fiel
}
finally
{
- IOUtils.DisposeWhileHandlingException(source);
+ IOUtils.CloseWhileHandlingException(source);
}
// rewind the buffer stream
@@ -439,7 +439,7 @@ protected Query CreateFieldQuery(Analyzer analyzer, Occur @operator, string fiel
///
/// Builds a new instance.
///
- /// This is intended for subclasses that wish to customize the generated queries.
+ /// This is intended for subclasses that wish to customize the generated queries.
///
/// Disable coord.
/// New instance.
@@ -452,7 +452,7 @@ protected virtual BooleanQuery NewBooleanQuery(bool disableCoord)
///
/// Builds a new instance.
///
- /// This is intended for subclasses that wish to customize the generated queries.
+ /// This is intended for subclasses that wish to customize the generated queries.
///
/// Term.
/// New instance.
@@ -465,7 +465,7 @@ protected virtual Query NewTermQuery(Term term)
///
/// Builds a new instance.
///
- /// This is intended for subclasses that wish to customize the generated queries.
+ /// This is intended for subclasses that wish to customize the generated queries.
///
/// New instance.
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -477,7 +477,7 @@ protected virtual PhraseQuery NewPhraseQuery()
///
/// Builds a new instance.
///
- /// This is intended for subclasses that wish to customize the generated queries.
+ /// This is intended for subclasses that wish to customize the generated queries.
///
/// New instance.
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -486,4 +486,4 @@ protected virtual MultiPhraseQuery NewMultiPhraseQuery()
return new MultiPhraseQuery();
}
}
-}
\ No newline at end of file
+}