package org.apache.lucene.index;
import java.io.IOException;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash.BytesStartArray;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.IntBlockPool;
abstract class TermsHashPerField implements Comparable<TermsHashPerField> {
private static final int HASH_INIT_SIZE = 4;
private final TermsHashPerField nextPerField;
private final IntBlockPool intPool;
final ByteBlockPool bytePool;
private int[] termStreamAddressBuffer;
private int streamAddressOffset;
private final int streamCount;
private final String fieldName;
final IndexOptions indexOptions;
private final BytesRefHash bytesHash;
ParallelPostingsArray postingsArray;
private int lastDocID;
TermsHashPerField(int streamCount, IntBlockPool intPool, ByteBlockPool bytePool, ByteBlockPool termBytePool,
Counter bytesUsed, TermsHashPerField nextPerField, String fieldName, IndexOptions indexOptions) {
this.intPool = intPool;
this.bytePool = bytePool;
this.streamCount = streamCount;
this.fieldName = fieldName;
this.nextPerField = nextPerField;
assert indexOptions != IndexOptions.NONE;
this.indexOptions = indexOptions;
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
}
void reset() {
bytesHash.clear(false);
sortedTermIDs = null;
if (nextPerField != null) {
nextPerField.reset();
}
}
final void initReader(ByteSliceReader reader, int termID, int stream) {
assert stream < streamCount;
int streamStartOffset = postingsArray.addressOffset[termID];
final int[] streamAddressBuffer = intPool.buffers[streamStartOffset >> IntBlockPool.INT_BLOCK_SHIFT];
final int offsetInAddressBuffer = streamStartOffset & IntBlockPool.INT_BLOCK_MASK;
reader.init(bytePool,
postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
streamAddressBuffer[offsetInAddressBuffer+stream]);
}
private int[] sortedTermIDs;
final void sortTerms() {
assert sortedTermIDs == null;
sortedTermIDs = bytesHash.sort();
}
final int[] getSortedTermIDs() {
assert sortedTermIDs != null;
return sortedTermIDs;
}
final void reinitHash() {
sortedTermIDs = null;
bytesHash.reinit();
}
private boolean doNextCall;
private void add(int textStart, final int docID) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) {
initStreamSlices(termID, docID);
} else {
positionStreamSlice(termID, docID);
}
}
private void initStreamSlices(int termID, int docID) throws IOException {
if ((2*streamCount) + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < (2*streamCount) * ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
termStreamAddressBuffer = intPool.buffer;
streamAddressOffset = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.addressOffset[termID] = streamAddressOffset + intPool.intOffset;
for (int i = 0; i < streamCount; i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
termStreamAddressBuffer[streamAddressOffset + i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = termStreamAddressBuffer[streamAddressOffset];
newTerm(termID, docID);
}
private boolean assertDocId(int docId) {
assert docId >= lastDocID : "docID must be >= " + lastDocID + " but was: " + docId;
lastDocID = docId;
return true;
}
void add(BytesRef termBytes, final int docID) throws IOException {
assert assertDocId(docID);
int termID = bytesHash.add(termBytes);
if (termID >= 0) {
initStreamSlices(termID, docID);
} else {
termID = positionStreamSlice(termID, docID);
}
if (doNextCall) {
nextPerField.add(postingsArray.textStarts[termID], docID);
}
}
private int positionStreamSlice(int termID, final int docID) throws IOException {
termID = (-termID) - 1;
int intStart = postingsArray.addressOffset[termID];
termStreamAddressBuffer = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
streamAddressOffset = intStart & IntBlockPool.INT_BLOCK_MASK;
addTerm(termID, docID);
return termID;
}
final void writeByte(int stream, byte b) {
int streamAddress = streamAddressOffset + stream;
int upto = termStreamAddressBuffer[streamAddress];
byte[] bytes = bytePool.buffers[upto >> ByteBlockPool.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & ByteBlockPool.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
termStreamAddressBuffer[streamAddress] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(termStreamAddressBuffer[streamAddress])++;
}
final void writeBytes(int stream, byte[] b, int offset, int len) {
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
final void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
final TermsHashPerField getNextPerField() {
return nextPerField;
}
final String getFieldName() {
return fieldName;
}
private static final class PostingsBytesStartArray extends BytesStartArray {
private final TermsHashPerField perField;
private final Counter bytesUsed;
private PostingsBytesStartArray(
TermsHashPerField perField, Counter bytesUsed) {
this.perField = perField;
this.bytesUsed = bytesUsed;
}
@Override
public int[] init() {
if (perField.postingsArray == null) {
perField.postingsArray = perField.createPostingsArray(2);
perField.newPostingsArray();
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
}
return perField.postingsArray.textStarts;
}
@Override
public int[] grow() {
ParallelPostingsArray postingsArray = perField.postingsArray;
final int oldSize = perField.postingsArray.size;
postingsArray = perField.postingsArray = postingsArray.grow();
perField.newPostingsArray();
bytesUsed.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
return postingsArray.textStarts;
}
@Override
public int[] clear() {
if (perField.postingsArray != null) {
bytesUsed.addAndGet(-(perField.postingsArray.size * perField.postingsArray.bytesPerPosting()));
perField.postingsArray = null;
perField.newPostingsArray();
}
return null;
}
@Override
public Counter bytesUsed() {
return bytesUsed;
}
}
@Override
public final int compareTo(TermsHashPerField other) {
return fieldName.compareTo(other.fieldName);
}
void finish() throws IOException {
if (nextPerField != null) {
nextPerField.finish();
}
}
final int getNumTerms() {
return bytesHash.size();
}
boolean start(IndexableField field, boolean first) {
if (nextPerField != null) {
doNextCall = nextPerField.start(field, first);
}
return true;
}
abstract void newTerm(int termID, final int docID) throws IOException;
abstract void addTerm(int termID, final int docID) throws IOException;
abstract void newPostingsArray();
abstract ParallelPostingsArray createPostingsArray(int size);
}