From f9e44c81ae637afe9bd4453e5043d3db2a61753b Mon Sep 17 00:00:00 2001 From: ekaterinadimitrova2 Date: Wed, 30 Oct 2024 13:15:14 -0400 Subject: [PATCH] CNDB-11434: Refactor tests to mitigate https://openjdk.org/jeps/416 (#1386) * CNDB-11434: Refactor TrieMemtableIndexTest * CNDB-11434: Refactor TinySegmentFlushingFailureTest * CNDB-11434: Make all tests in TrieMemtableIndexTest to be run with both offheap and onheap to improve test coverage --- .../org/apache/cassandra/cql3/CQLTester.java | 4 + .../apache/cassandra/index/sai/SAITester.java | 15 - ...mtableIndexAllocationsHeapBuffersTest.java | 38 ++ .../sai/memory/TrieMemtableIndexTest.java | 342 +---------------- .../sai/memory/TrieMemtableIndexTestBase.java | 354 ++++++++++++++++++ .../metrics/SegmentFlushingFailureTest.java | 2 - .../TinySegmentFlushingFailureTest.java | 50 ++- 7 files changed, 441 insertions(+), 364 deletions(-) create mode 100644 test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexAllocationsHeapBuffersTest.java create mode 100644 test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTestBase.java diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java index 1c6b33bc11cb..07f383f5cf68 100644 --- a/test/unit/org/apache/cassandra/cql3/CQLTester.java +++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java @@ -72,6 +72,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TestWatcher; import org.junit.runner.Description; @@ -2449,6 +2450,7 @@ protected static Gauge getPausedConnectionsGauge() return metrics.get(metricName); } + @Ignore // Check TinySegmentFlushingFailureTest for details why this annotation is needed here despite this is not a test public static class Vector extends AbstractList { private final T[] values; @@ -2656,6 +2658,7 @@ public String toString() } } + @Ignore // Check TinySegmentFlushingFailureTest for details why this annotation is needed here despite this is not a test public static class Randomization { private long seed; @@ -2752,6 +2755,7 @@ public void nextBytes(byte[] bytes) } } + @Ignore // Check TinySegmentFlushingFailureTest for details why this annotation is needed here despite this is not a test public static class FailureWatcher extends TestWatcher { @Override diff --git a/test/unit/org/apache/cassandra/index/sai/SAITester.java b/test/unit/org/apache/cassandra/index/sai/SAITester.java index 26192dc01807..fa3bb11d0684 100644 --- a/test/unit/org/apache/cassandra/index/sai/SAITester.java +++ b/test/unit/org/apache/cassandra/index/sai/SAITester.java @@ -848,21 +848,6 @@ protected Set componentFiles(Collection indexFiles, IndexComponentTy return indexFiles.stream().filter(c -> c.name().endsWith(componentName)).collect(Collectors.toSet()); } - protected static void setSegmentWriteBufferSpace(final int segmentSize) throws Exception - { - NamedMemoryLimiter limiter = (NamedMemoryLimiter) V1OnDiskFormat.class.getDeclaredField("SEGMENT_BUILD_MEMORY_LIMITER").get(null); - Field limitBytes = limiter.getClass().getDeclaredField("limitBytes"); - limitBytes.setAccessible(true); - Field modifiersField = ReflectionUtils.getField(Field.class, "modifiers"); - modifiersField.setAccessible(true); - modifiersField.setInt(limitBytes, limitBytes.getModifiers() & ~Modifier.FINAL); - limitBytes.set(limiter, segmentSize); - limitBytes = V1OnDiskFormat.class.getDeclaredField("SEGMENT_BUILD_MEMORY_LIMIT"); - limitBytes.setAccessible(true); - modifiersField.setInt(limitBytes, limitBytes.getModifiers() & ~Modifier.FINAL); - limitBytes.set(limiter, segmentSize); - } - /** * Run repeated verification task concurrently with target test */ diff --git a/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexAllocationsHeapBuffersTest.java b/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexAllocationsHeapBuffersTest.java new file mode 100644 index 000000000000..f6567475045d --- /dev/null +++ b/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexAllocationsHeapBuffersTest.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.index.sai.memory; + +import org.junit.BeforeClass; + +import org.apache.cassandra.config.Config; +import org.apache.cassandra.db.memtable.TrieMemtable; +import org.apache.cassandra.io.compress.BufferType; + +import static org.junit.Assert.assertEquals; + +public class TrieMemtableIndexAllocationsHeapBuffersTest extends TrieMemtableIndexTestBase +{ + @BeforeClass + public static void setUpClass() + { + System.setProperty("cassandra.trie.memtable.shard.count", "8"); + setup(Config.MemtableAllocationType.heap_buffers); + assertEquals(TrieMemtable.BUFFER_TYPE, BufferType.ON_HEAP); + } +} diff --git a/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTest.java b/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTest.java index 3e1d3b585366..c047d9c0ca0e 100644 --- a/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTest.java +++ b/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTest.java @@ -18,355 +18,21 @@ package org.apache.cassandra.index.sai.memory; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.stream.Collectors; - -import org.junit.After; -import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Test; -import org.apache.cassandra.cql3.Operator; -import org.apache.cassandra.db.Clustering; -import org.apache.cassandra.db.ColumnFamilyStore; -import org.apache.cassandra.db.DecoratedKey; -import org.apache.cassandra.db.PartitionPosition; -import org.apache.cassandra.db.marshal.Int32Type; -import org.apache.cassandra.db.memtable.AbstractAllocatorMemtable; +import org.apache.cassandra.config.Config; import org.apache.cassandra.db.memtable.TrieMemtable; -import org.apache.cassandra.dht.AbstractBounds; -import org.apache.cassandra.dht.BootStrapper; -import org.apache.cassandra.dht.Bounds; -import org.apache.cassandra.dht.ExcludingBounds; -import org.apache.cassandra.dht.IPartitioner; -import org.apache.cassandra.dht.IncludingExcludingBounds; -import org.apache.cassandra.dht.Range; -import org.apache.cassandra.index.sai.IndexContext; -import org.apache.cassandra.index.sai.QueryContext; -import org.apache.cassandra.index.sai.SAITester; -import org.apache.cassandra.index.sai.plan.Expression; -import org.apache.cassandra.index.sai.utils.PrimaryKey; -import org.apache.cassandra.index.sai.utils.RangeIterator; -import org.apache.cassandra.index.sai.utils.TypeUtil; -import org.apache.cassandra.inject.Injections; -import org.apache.cassandra.inject.InvokePointBuilder; import org.apache.cassandra.io.compress.BufferType; -import org.apache.cassandra.locator.TokenMetadata; -import org.apache.cassandra.schema.MockSchema; -import org.apache.cassandra.schema.TableMetadata; -import org.apache.cassandra.service.StorageService; -import org.apache.cassandra.utils.FBUtilities; -import org.apache.cassandra.utils.Pair; -import org.apache.cassandra.utils.ReflectionUtils; -import org.apache.cassandra.utils.bytecomparable.ByteComparable; -import org.apache.cassandra.utils.bytecomparable.ByteSource; -import org.apache.cassandra.utils.concurrent.OpOrder; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -public class TrieMemtableIndexTest extends SAITester +public class TrieMemtableIndexTest extends TrieMemtableIndexTestBase { - private static final Injections.Counter indexSearchCounter = Injections.newCounter("IndexSearchCounter") - .add(InvokePointBuilder.newInvokePoint() - .onClass(TrieMemoryIndex.class) - .onMethod("search")) - .build(); - - private ColumnFamilyStore cfs; - private IndexContext indexContext; - private TrieMemtableIndex memtableIndex; - private AbstractAllocatorMemtable memtable; - private IPartitioner partitioner; - private Map keyMap; - private Map rowMap; - @BeforeClass public static void setShardCount() { System.setProperty("cassandra.trie.memtable.shard.count", "8"); - } - - @Before - public void setup() throws Throwable - { - assertEquals(8, TrieMemtable.SHARD_COUNT); - - TokenMetadata metadata = StorageService.instance.getTokenMetadata(); - metadata.updateNormalTokens(BootStrapper.getRandomTokens(metadata, 10), FBUtilities.getBroadcastAddressAndPort()); - - TableMetadata tableMetadata = TableMetadata.builder("ks", "tb") - .addPartitionKeyColumn("pk", Int32Type.instance) - .addRegularColumn("val", Int32Type.instance) - .build(); - cfs = MockSchema.newCFS(tableMetadata); - partitioner = cfs.getPartitioner(); - memtable = (AbstractAllocatorMemtable) cfs.getCurrentMemtable(); - indexContext = SAITester.createIndexContext("index", Int32Type.instance, cfs); - indexSearchCounter.reset(); - keyMap = new TreeMap<>(); - rowMap = new HashMap<>(); - - Injections.inject(indexSearchCounter); - } - - @After - public void resetBufferType() throws Exception - { - setTrieMemtableBufferType(BufferType.OFF_HEAP); - } - - @Test - public void onHeapAllocation() throws Exception - { - setTrieMemtableBufferType(BufferType.ON_HEAP); - memtableIndex = new TrieMemtableIndex(indexContext, memtable); - assertEquals(TrieMemtable.SHARD_COUNT, memtableIndex.shardCount()); - - assertTrue(memtable.getAllocator().onHeap().owns() == 0); - assertTrue(memtable.getAllocator().offHeap().owns() == 0); - - for (int row = 0; row < 100; row++) - { - addRow(row, row); - } - - assertTrue(memtable.getAllocator().onHeap().owns() > 0); - assertTrue(memtable.getAllocator().offHeap().owns() == 0); - } - - @Test - public void offHeapAllocation() throws Exception - { - setTrieMemtableBufferType(BufferType.OFF_HEAP); - memtableIndex = new TrieMemtableIndex(indexContext, memtable); - assertEquals(TrieMemtable.SHARD_COUNT, memtableIndex.shardCount()); - - assertTrue(memtable.getAllocator().onHeap().owns() == 0); - assertTrue(memtable.getAllocator().offHeap().owns() == 0); - - for (int row = 0; row < 100; row++) - { - addRow(row, row); - } - - assertTrue(memtable.getAllocator().onHeap().owns() > 0); - assertTrue(memtable.getAllocator().offHeap().owns() > 0); - } - - @Test - public void randomQueryTest() throws Exception - { - memtableIndex = new TrieMemtableIndex(indexContext, memtable); - assertEquals(TrieMemtable.SHARD_COUNT, memtableIndex.shardCount()); - - for (int row = 0; row < getRandom().nextIntBetween(1000, 5000); row++) - { - int pk = getRandom().nextIntBetween(0, 10000); - while (rowMap.containsKey(pk)) - pk = getRandom().nextIntBetween(0, 10000); - int value = getRandom().nextIntBetween(0, 100); - rowMap.put(pk, value); - addRow(pk, value); - } - - List keys = new ArrayList<>(keyMap.keySet()); - - for (int executionCount = 0; executionCount < 1000; executionCount++) - { - Expression expression = generateRandomExpression(); - - AbstractBounds keyRange = generateRandomBounds(keys); - - Set expectedKeys = keyMap.keySet() - .stream() - .filter(keyRange::contains) - .map(keyMap::get) - .filter(pk -> expression.isSatisfiedBy(Int32Type.instance.decompose(rowMap.get(pk)))) - .collect(Collectors.toSet()); - - Set foundKeys = new HashSet<>(); - - try (RangeIterator iterator = memtableIndex.search(new QueryContext(), expression, keyRange, 0)) - { - while (iterator.hasNext()) - { - DecoratedKey k = iterator.next().partitionKey(); - int key = Int32Type.instance.compose(k.getKey()); - assertFalse(foundKeys.contains(key)); - foundKeys.add(key); - } - } - - assertEquals(expectedKeys, foundKeys); - } - } - - @Test - public void indexIteratorTest() - { - memtableIndex = new TrieMemtableIndex(indexContext, memtable); - - Map> terms = buildTermMap(); - - terms.entrySet() - .stream() - .forEach(entry -> entry.getValue() - .forEach(pk -> addRow(Int32Type.instance.compose(pk.getKey()), entry.getKey()))); - - for (int executionCount = 0; executionCount < 1000; executionCount++) - { - // These keys have midrange tokens that select 3 of the 8 range indexes - DecoratedKey temp1 = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); - DecoratedKey temp2 = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); - DecoratedKey minimum = temp1.compareTo(temp2) <= 0 ? temp1 : temp2; - DecoratedKey maximum = temp1.compareTo(temp2) <= 0 ? temp2 : temp1; - - Iterator>> iterator = memtableIndex.iterator(minimum, maximum); - - while (iterator.hasNext()) - { - Pair> termPair = iterator.next(); - int term = termFromComparable(termPair.left); - // The iterator will return keys outside the range of min/max so we need to filter here to - // get the correct keys - List expectedPks = terms.get(term) - .stream() - .filter(pk -> pk.compareTo(minimum) >= 0 && pk.compareTo(maximum) <= 0) - .sorted() - .collect(Collectors.toList()); - List termPks = new ArrayList<>(); - while (termPair.right.hasNext()) - { - DecoratedKey pk = termPair.right.next().partitionKey(); - if (pk.compareTo(minimum) >= 0 && pk.compareTo(maximum) <= 0) - termPks.add(pk); - } - assertEquals(expectedPks, termPks); - } - } - } - - private Expression generateRandomExpression() - { - Expression expression = new Expression(indexContext); - - int equality = getRandom().nextIntBetween(0, 100); - int lower = getRandom().nextIntBetween(0, 75); - int upper = getRandom().nextIntBetween(25, 100); - while (upper <= lower) - upper = getRandom().nextIntBetween(0, 100); - - if (getRandom().nextBoolean()) - expression.add(Operator.EQ, Int32Type.instance.decompose(equality)); - else - { - boolean useLower = getRandom().nextBoolean(); - boolean useUpper = getRandom().nextBoolean(); - if (!useLower && !useUpper) - useLower = useUpper = true; - if (useLower) - expression.add(getRandom().nextBoolean() ? Operator.GT : Operator.GTE, Int32Type.instance.decompose(lower)); - if (useUpper) - expression.add(getRandom().nextBoolean() ? Operator.LT : Operator.LTE, Int32Type.instance.decompose(upper)); - } - return expression; - } - - private AbstractBounds generateRandomBounds(List keys) - { - PartitionPosition leftBound = getRandom().nextBoolean() ? partitioner.getMinimumToken().minKeyBound() - : keys.get(getRandom().nextIntBetween(0, keys.size() - 1)).getToken().minKeyBound(); - - PartitionPosition rightBound = getRandom().nextBoolean() ? partitioner.getMinimumToken().minKeyBound() - : keys.get(getRandom().nextIntBetween(0, keys.size() - 1)).getToken().maxKeyBound(); - - AbstractBounds keyRange; - - if (leftBound.isMinimum() && rightBound.isMinimum()) - keyRange = new Range<>(leftBound, rightBound); - else - { - if (AbstractBounds.strictlyWrapsAround(leftBound, rightBound)) - { - PartitionPosition temp = leftBound; - leftBound = rightBound; - rightBound = temp; - } - if (getRandom().nextBoolean()) - keyRange = new Bounds<>(leftBound, rightBound); - else if (getRandom().nextBoolean()) - keyRange = new ExcludingBounds<>(leftBound, rightBound); - else - keyRange = new IncludingExcludingBounds<>(leftBound, rightBound); - } - return keyRange; - } - - private int termFromComparable(ByteComparable comparable) - { - ByteSource.Peekable peekable = ByteSource.peekable(comparable.asComparableBytes(TypeUtil.BYTE_COMPARABLE_VERSION)); - return Int32Type.instance.compose(Int32Type.instance.fromComparableBytes(peekable, TypeUtil.BYTE_COMPARABLE_VERSION)); - } - - private Map> buildTermMap() - { - Map> terms = new HashMap<>(); - - for (int count = 0; count < 10000; count++) - { - int term = getRandom().nextIntBetween(0, 100); - Set pks; - if (terms.containsKey(term)) - pks = terms.get(term); - else - { - pks = new HashSet<>(); - terms.put(term, pks); - } - DecoratedKey key = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); - while (pks.contains(key)) - key = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); - pks.add(key); - } - return terms; - } - - private void addRow(int pk, int value) - { - DecoratedKey key = makeKey(cfs.metadata(), pk); - memtableIndex.index(key, - Clustering.EMPTY, - Int32Type.instance.decompose(value), - cfs.getCurrentMemtable(), - new OpOrder().start()); - keyMap.put(key, pk); - } - - private DecoratedKey makeKey(TableMetadata table, Integer partitionKey) - { - ByteBuffer key = table.partitionKeyType.fromString(partitionKey.toString()); - return table.partitioner.decorateKey(key); - } - - private void setTrieMemtableBufferType(final BufferType newBufferType) throws Exception - { - Field bufferType = TrieMemtable.class.getDeclaredField("BUFFER_TYPE"); - bufferType.setAccessible(true); - Field modifiersField = ReflectionUtils.getField(Field.class, "modifiers"); - modifiersField.setAccessible(true); - modifiersField.setInt(bufferType, bufferType.getModifiers() & ~Modifier.FINAL); - bufferType.set(null, newBufferType); + setup(Config.MemtableAllocationType.offheap_buffers); + assertEquals(TrieMemtable.BUFFER_TYPE, BufferType.OFF_HEAP); } } diff --git a/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTestBase.java b/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTestBase.java new file mode 100644 index 000000000000..134ad1d1ca83 --- /dev/null +++ b/test/unit/org/apache/cassandra/index/sai/memory/TrieMemtableIndexTestBase.java @@ -0,0 +1,354 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.index.sai.memory; + +import java.lang.reflect.Field; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.stream.Collectors; + +import com.google.common.base.Throwables; +import org.junit.Before; +import org.junit.Test; + +import org.apache.cassandra.config.Config; +import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.cql3.CQLTester; +import org.apache.cassandra.cql3.Operator; +import org.apache.cassandra.db.Clustering; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; +import org.apache.cassandra.db.PartitionPosition; +import org.apache.cassandra.db.marshal.Int32Type; +import org.apache.cassandra.db.memtable.AbstractAllocatorMemtable; +import org.apache.cassandra.db.memtable.TrieMemtable; +import org.apache.cassandra.dht.AbstractBounds; +import org.apache.cassandra.dht.BootStrapper; +import org.apache.cassandra.dht.Bounds; +import org.apache.cassandra.dht.ExcludingBounds; +import org.apache.cassandra.dht.IPartitioner; +import org.apache.cassandra.dht.IncludingExcludingBounds; +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.index.sai.IndexContext; +import org.apache.cassandra.index.sai.QueryContext; +import org.apache.cassandra.index.sai.SAITester; +import org.apache.cassandra.index.sai.plan.Expression; +import org.apache.cassandra.index.sai.utils.PrimaryKey; +import org.apache.cassandra.index.sai.utils.RangeIterator; +import org.apache.cassandra.index.sai.utils.TypeUtil; +import org.apache.cassandra.inject.Injections; +import org.apache.cassandra.inject.InvokePointBuilder; +import org.apache.cassandra.io.compress.BufferType; +import org.apache.cassandra.locator.TokenMetadata; +import org.apache.cassandra.schema.MockSchema; +import org.apache.cassandra.schema.TableMetadata; +import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.Pair; +import org.apache.cassandra.utils.bytecomparable.ByteComparable; +import org.apache.cassandra.utils.bytecomparable.ByteSource; +import org.apache.cassandra.utils.concurrent.OpOrder; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public abstract class TrieMemtableIndexTestBase extends SAITester +{ + static final Injections.Counter indexSearchCounter = Injections.newCounter("IndexSearchCounter") + .add(InvokePointBuilder.newInvokePoint() + .onClass(TrieMemoryIndex.class) + .onMethod("search")) + .build(); + + ColumnFamilyStore cfs; + IndexContext indexContext; + TrieMemtableIndex memtableIndex; + AbstractAllocatorMemtable memtable; + IPartitioner partitioner; + Map keyMap; + Map rowMap; + + public static void setup(Config.MemtableAllocationType allocationType) + { + try + { + Field confField = DatabaseDescriptor.class.getDeclaredField("conf"); + confField.setAccessible(true); + Config conf = (Config) confField.get(null); + conf.memtable_allocation_type = allocationType; + conf.memtable_cleanup_threshold = 0.8f; // give us more space to fit test data without flushing + } + catch (NoSuchFieldException | IllegalAccessException e) + { + throw Throwables.propagate(e); + } + + CQLTester.setUpClass(); + System.out.println("setUpClass done, allocation type " + allocationType); + } + + @Before + public void setup() throws Throwable + { + assertEquals(8, TrieMemtable.SHARD_COUNT); + + TokenMetadata metadata = StorageService.instance.getTokenMetadata(); + metadata.updateNormalTokens(BootStrapper.getRandomTokens(metadata, 10), FBUtilities.getBroadcastAddressAndPort()); + + TableMetadata tableMetadata = TableMetadata.builder("ks", "tb") + .addPartitionKeyColumn("pk", Int32Type.instance) + .addRegularColumn("val", Int32Type.instance) + .build(); + cfs = MockSchema.newCFS(tableMetadata); + partitioner = cfs.getPartitioner(); + memtable = (AbstractAllocatorMemtable) cfs.getCurrentMemtable(); + indexContext = SAITester.createIndexContext("index", Int32Type.instance, cfs); + indexSearchCounter.reset(); + keyMap = new TreeMap<>(); + rowMap = new HashMap<>(); + + Injections.inject(indexSearchCounter); + } + + @Test + public void allocation() throws Throwable + { + assertEquals(8, TrieMemtable.SHARD_COUNT); + memtableIndex = new TrieMemtableIndex(indexContext, memtable); + assertEquals(TrieMemtable.SHARD_COUNT, memtableIndex.shardCount()); + + assertEquals(0, memtable.getAllocator().onHeap().owns()); + assertEquals(0, memtable.getAllocator().offHeap().owns()); + + for (int row = 0; row < 100; row++) + { + addRow(row, row); + } + + assertTrue(memtable.getAllocator().onHeap().owns() > 0); + + if (TrieMemtable.BUFFER_TYPE == BufferType.OFF_HEAP) + assertTrue(memtable.getAllocator().onHeap().owns() > 0); + else + assertEquals(0, memtable.getAllocator().offHeap().owns()); + } + + @Test + public void randomQueryTest() throws Exception + { + memtableIndex = new TrieMemtableIndex(indexContext, memtable); + assertEquals(TrieMemtable.SHARD_COUNT, memtableIndex.shardCount()); + + for (int row = 0; row < getRandom().nextIntBetween(1000, 5000); row++) + { + int pk = getRandom().nextIntBetween(0, 10000); + while (rowMap.containsKey(pk)) + pk = getRandom().nextIntBetween(0, 10000); + int value = getRandom().nextIntBetween(0, 100); + rowMap.put(pk, value); + addRow(pk, value); + } + + List keys = new ArrayList<>(keyMap.keySet()); + + for (int executionCount = 0; executionCount < 1000; executionCount++) + { + Expression expression = generateRandomExpression(); + + AbstractBounds keyRange = generateRandomBounds(keys); + + Set expectedKeys = keyMap.keySet() + .stream() + .filter(keyRange::contains) + .map(keyMap::get) + .filter(pk -> expression.isSatisfiedBy(Int32Type.instance.decompose(rowMap.get(pk)))) + .collect(Collectors.toSet()); + + Set foundKeys = new HashSet<>(); + + try (RangeIterator iterator = memtableIndex.search(new QueryContext(), expression, keyRange, 0)) + { + while (iterator.hasNext()) + { + DecoratedKey k = iterator.next().partitionKey(); + int key = Int32Type.instance.compose(k.getKey()); + assertFalse(foundKeys.contains(key)); + foundKeys.add(key); + } + } + + assertEquals(expectedKeys, foundKeys); + } + } + + @Test + public void indexIteratorTest() + { + memtableIndex = new TrieMemtableIndex(indexContext, memtable); + + Map> terms = buildTermMap(); + + terms.entrySet() + .stream() + .forEach(entry -> entry.getValue() + .forEach(pk -> addRow(Int32Type.instance.compose(pk.getKey()), entry.getKey()))); + + for (int executionCount = 0; executionCount < 1000; executionCount++) + { + // These keys have midrange tokens that select 3 of the 8 range indexes + DecoratedKey temp1 = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); + DecoratedKey temp2 = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); + DecoratedKey minimum = temp1.compareTo(temp2) <= 0 ? temp1 : temp2; + DecoratedKey maximum = temp1.compareTo(temp2) <= 0 ? temp2 : temp1; + + Iterator>> iterator = memtableIndex.iterator(minimum, maximum); + + while (iterator.hasNext()) + { + Pair> termPair = iterator.next(); + int term = termFromComparable(termPair.left); + // The iterator will return keys outside the range of min/max, so we need to filter here to + // get the correct keys + List expectedPks = terms.get(term) + .stream() + .filter(pk -> pk.compareTo(minimum) >= 0 && pk.compareTo(maximum) <= 0) + .sorted() + .collect(Collectors.toList()); + List termPks = new ArrayList<>(); + while (termPair.right.hasNext()) + { + DecoratedKey pk = termPair.right.next().partitionKey(); + if (pk.compareTo(minimum) >= 0 && pk.compareTo(maximum) <= 0) + termPks.add(pk); + } + assertEquals(expectedPks, termPks); + } + } + } + + private Expression generateRandomExpression() + { + Expression expression = new Expression(indexContext); + + int equality = getRandom().nextIntBetween(0, 100); + int lower = getRandom().nextIntBetween(0, 75); + int upper = getRandom().nextIntBetween(25, 100); + while (upper <= lower) + upper = getRandom().nextIntBetween(0, 100); + + if (getRandom().nextBoolean()) + expression.add(Operator.EQ, Int32Type.instance.decompose(equality)); + else + { + boolean useLower = getRandom().nextBoolean(); + boolean useUpper = getRandom().nextBoolean(); + if (!useLower && !useUpper) + useLower = useUpper = true; + if (useLower) + expression.add(getRandom().nextBoolean() ? Operator.GT : Operator.GTE, Int32Type.instance.decompose(lower)); + if (useUpper) + expression.add(getRandom().nextBoolean() ? Operator.LT : Operator.LTE, Int32Type.instance.decompose(upper)); + } + return expression; + } + + private AbstractBounds generateRandomBounds(List keys) + { + PartitionPosition leftBound = getRandom().nextBoolean() ? partitioner.getMinimumToken().minKeyBound() + : keys.get(getRandom().nextIntBetween(0, keys.size() - 1)).getToken().minKeyBound(); + + PartitionPosition rightBound = getRandom().nextBoolean() ? partitioner.getMinimumToken().minKeyBound() + : keys.get(getRandom().nextIntBetween(0, keys.size() - 1)).getToken().maxKeyBound(); + + AbstractBounds keyRange; + + if (leftBound.isMinimum() && rightBound.isMinimum()) + keyRange = new Range<>(leftBound, rightBound); + else + { + if (AbstractBounds.strictlyWrapsAround(leftBound, rightBound)) + { + PartitionPosition temp = leftBound; + leftBound = rightBound; + rightBound = temp; + } + if (getRandom().nextBoolean()) + keyRange = new Bounds<>(leftBound, rightBound); + else if (getRandom().nextBoolean()) + keyRange = new ExcludingBounds<>(leftBound, rightBound); + else + keyRange = new IncludingExcludingBounds<>(leftBound, rightBound); + } + return keyRange; + } + + private int termFromComparable(ByteComparable comparable) + { + ByteSource.Peekable peekable = ByteSource.peekable(comparable.asComparableBytes(TypeUtil.BYTE_COMPARABLE_VERSION)); + return Int32Type.instance.compose(Int32Type.instance.fromComparableBytes(peekable, TypeUtil.BYTE_COMPARABLE_VERSION)); + } + + private Map> buildTermMap() + { + Map> terms = new HashMap<>(); + + for (int count = 0; count < 10000; count++) + { + int term = getRandom().nextIntBetween(0, 100); + Set pks; + if (terms.containsKey(term)) + pks = terms.get(term); + else + { + pks = new HashSet<>(); + terms.put(term, pks); + } + DecoratedKey key = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); + while (pks.contains(key)) + key = makeKey(cfs.metadata(), getRandom().nextIntBetween(0, 20000)); + pks.add(key); + } + return terms; + } + + void addRow(int pk, int value) + { + DecoratedKey key = makeKey(cfs.metadata(), pk); + memtableIndex.index(key, + Clustering.EMPTY, + Int32Type.instance.decompose(value), + cfs.getCurrentMemtable(), + new OpOrder().start()); + keyMap.put(key, pk); + } + + DecoratedKey makeKey(TableMetadata table, Integer partitionKey) + { + ByteBuffer key = table.partitionKeyType.fromString(partitionKey.toString()); + return table.partitioner.decorateKey(key); + } +} diff --git a/test/unit/org/apache/cassandra/index/sai/metrics/SegmentFlushingFailureTest.java b/test/unit/org/apache/cassandra/index/sai/metrics/SegmentFlushingFailureTest.java index 167605592f92..1e337547ac1b 100644 --- a/test/unit/org/apache/cassandra/index/sai/metrics/SegmentFlushingFailureTest.java +++ b/test/unit/org/apache/cassandra/index/sai/metrics/SegmentFlushingFailureTest.java @@ -28,7 +28,6 @@ import org.junit.Test; import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.exceptions.ReadFailureException; import org.apache.cassandra.config.StorageAttachedIndexOptions; import org.apache.cassandra.index.sai.SAITester; import org.apache.cassandra.index.sai.disk.format.Version; @@ -42,7 +41,6 @@ import static org.apache.cassandra.inject.Injections.newCounter; import static org.apache.cassandra.inject.InvokePointBuilder.newInvokePoint; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; diff --git a/test/unit/org/apache/cassandra/index/sai/metrics/TinySegmentFlushingFailureTest.java b/test/unit/org/apache/cassandra/index/sai/metrics/TinySegmentFlushingFailureTest.java index c1263a6c63e3..12256981ea17 100644 --- a/test/unit/org/apache/cassandra/index/sai/metrics/TinySegmentFlushingFailureTest.java +++ b/test/unit/org/apache/cassandra/index/sai/metrics/TinySegmentFlushingFailureTest.java @@ -17,22 +17,54 @@ */ package org.apache.cassandra.index.sai.metrics; -import org.junit.Before; import org.junit.BeforeClass; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; +import java.lang.reflect.Field; + +import org.apache.cassandra.config.Config; import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.cql3.CQLTester; + -public class TinySegmentFlushingFailureTest extends SegmentFlushingFailureTest +@RunWith(Enclosed.class) +public class TinySegmentFlushingFailureTest extends CQLTester { - @Before - public void setSegmentBufferSpace() throws Throwable + /** + * Set the necessary configuration before any tests run. + */ + @BeforeClass + public static void setUpClass() { - setSegmentWriteBufferSpace(0); + try + { + Field confField = DatabaseDescriptor.class.getDeclaredField("conf"); + confField.setAccessible(true); + Config conf = (Config) confField.get(null); + conf.sai_options.segment_write_buffer_space_mb = 0; + + System.out.println("Configuration set: segment_write_buffer_space_mb = " + 0); + } + catch (NoSuchFieldException | IllegalAccessException e) + { + throw new RuntimeException("Failed to set configuration segment_write_buffer_space_mb = 0", e); + } + + CQLTester.setUpClass(); } - @Override - protected long expectedBytesLimit() + /** + * These tests will run only after the outer class has completed its setup. Otherwise, SAITester assigns default + * value to segment_write_buffer_space_mb, and we cannot override it without reflection or using Unsafe. + */ + public static class TinySegmentFlushingFailureInnerClassTest extends SegmentFlushingFailureTest { - return 0; + + @Override + protected long expectedBytesLimit() + { + return 0; + } } -} +} \ No newline at end of file