Skip to content

Commit

Permalink
remove deleted_blocks for container schema two and three
Browse files Browse the repository at this point in the history
  • Loading branch information
ChenSammi committed Feb 20, 2024
1 parent c8e6cab commit decb696
Show file tree
Hide file tree
Showing 7 changed files with 9 additions and 54 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
import org.apache.hadoop.hdds.utils.db.DBDefinition;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;

import java.io.File;

Expand Down Expand Up @@ -70,7 +69,4 @@ public ConfigurationSource getConfig() {

public abstract DBColumnFamilyDefinition<String, Long>
getMetadataColumnFamily();

public abstract DBColumnFamilyDefinition<String, ChunkInfoList>
getDeletedBlocksColumnFamily();
}
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,6 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {

private Table<String, BlockData> blockDataTableWithIterator;

private Table<String, ChunkInfoList> deletedBlocksTable;

static final Logger LOG =
LoggerFactory.getLogger(AbstractDatanodeStore.class);
private volatile DBStore store;
Expand Down Expand Up @@ -154,10 +152,6 @@ public void start(ConfigurationSource config)

blockDataTable = new DatanodeTable<>(blockDataTableWithIterator);
checkTableStatus(blockDataTable, blockDataTable.getName());

deletedBlocksTable = new DatanodeTable<>(
dbDef.getDeletedBlocksColumnFamily().getTable(this.store));
checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName());
}
}

Expand Down Expand Up @@ -191,7 +185,7 @@ public Table<String, BlockData> getBlockDataTable() {

@Override
public Table<String, ChunkInfoList> getDeletedBlocksTable() {
return deletedBlocksTable;
throw new UnsupportedOperationException("DeletedBlocksTable is only supported in Container Schema One");
}

@Override
Expand Down Expand Up @@ -250,7 +244,7 @@ protected Table<String, BlockData> getBlockDataTableWithIterator() {
return this.blockDataTableWithIterator;
}

private static void checkTableStatus(Table<?, ?> table, String name)
protected static void checkTableStatus(Table<?, ?> table, String name)
throws IOException {
String logMessage = "Unable to get a reference to %s table. Cannot " +
"continue.";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@ public DBColumnFamilyDefinition<String, Long> getMetadataColumnFamily() {
return METADATA;
}

@Override
public DBColumnFamilyDefinition<String, ChunkInfoList>
getDeletedBlocksColumnFamily() {
return DELETED_BLOCKS;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
import org.apache.hadoop.hdds.utils.db.Proto2Codec;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile;

Expand Down Expand Up @@ -74,15 +73,6 @@ public class DatanodeSchemaThreeDBDefinition
Long.class,
LongCodec.get());

public static final DBColumnFamilyDefinition<String, ChunkInfoList>
DELETED_BLOCKS =
new DBColumnFamilyDefinition<>(
"deleted_blocks",
String.class,
FixedLengthStringCodec.get(),
ChunkInfoList.class,
ChunkInfoList.getCodec());

public static final DBColumnFamilyDefinition<String, DeletedBlocksTransaction>
DELETE_TRANSACTION =
new DBColumnFamilyDefinition<>(
Expand All @@ -98,7 +88,6 @@ public class DatanodeSchemaThreeDBDefinition
COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap(
BLOCK_DATA,
METADATA,
DELETED_BLOCKS,
DELETE_TRANSACTION);

public DatanodeSchemaThreeDBDefinition(String dbPath,
Expand All @@ -120,7 +109,6 @@ public DatanodeSchemaThreeDBDefinition(String dbPath,

BLOCK_DATA.setCfOptions(cfOptions);
METADATA.setCfOptions(cfOptions);
DELETED_BLOCKS.setCfOptions(cfOptions);
DELETE_TRANSACTION.setCfOptions(cfOptions);
}

Expand All @@ -140,12 +128,6 @@ public DBColumnFamilyDefinition<String, Long> getMetadataColumnFamily() {
return METADATA;
}

@Override
public DBColumnFamilyDefinition<String, ChunkInfoList>
getDeletedBlocksColumnFamily() {
return DELETED_BLOCKS;
}

public DBColumnFamilyDefinition<String, DeletedBlocksTransaction>
getDeleteTransactionsColumnFamily() {
return DELETE_TRANSACTION;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import org.apache.hadoop.hdds.utils.db.Proto2Codec;
import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;

Expand Down Expand Up @@ -58,15 +57,6 @@ public class DatanodeSchemaTwoDBDefinition
Long.class,
LongCodec.get());

public static final DBColumnFamilyDefinition<String, ChunkInfoList>
DELETED_BLOCKS =
new DBColumnFamilyDefinition<>(
"deleted_blocks",
String.class,
StringCodec.get(),
ChunkInfoList.class,
ChunkInfoList.getCodec());

public static final DBColumnFamilyDefinition<Long, DeletedBlocksTransaction>
DELETE_TRANSACTION =
new DBColumnFamilyDefinition<>(
Expand All @@ -85,7 +75,6 @@ public DatanodeSchemaTwoDBDefinition(String dbPath,
COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap(
BLOCK_DATA,
METADATA,
DELETED_BLOCKS,
DELETE_TRANSACTION);

@Override
Expand All @@ -104,12 +93,6 @@ public DBColumnFamilyDefinition<String, Long> getMetadataColumnFamily() {
return METADATA;
}

@Override
public DBColumnFamilyDefinition<String, ChunkInfoList>
getDeletedBlocksColumnFamily() {
return DELETED_BLOCKS;
}

public DBColumnFamilyDefinition<Long, DeletedBlocksTransaction>
getDeleteTransactionsColumnFamily() {
return DELETE_TRANSACTION;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@
* places all data in the default column family.
*/
public class DatanodeStoreSchemaOneImpl extends AbstractDatanodeStore {

private Table<String, ChunkInfoList> deletedBlocksTable;

/**
* Constructs the metadata store and starts the DB Services.
*
Expand All @@ -38,12 +41,15 @@ public DatanodeStoreSchemaOneImpl(ConfigurationSource config, String dbPath,
boolean openReadOnly) throws IOException {
super(config, new DatanodeSchemaOneDBDefinition(dbPath, config),
openReadOnly);
deletedBlocksTable = new DatanodeTable<>(
((DatanodeSchemaOneDBDefinition) getDbDef()).getDeletedBlocksColumnFamily().getTable(getStore()));
checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName());
}

@Override
public Table<String, ChunkInfoList> getDeletedBlocksTable() {
// Return a wrapper around the deleted blocks table to handle prefixes
// when all data is stored in a single table.
return new SchemaOneDeletedBlocksTable(super.getDeletedBlocksTable());
return new SchemaOneDeletedBlocksTable(deletedBlocksTable);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ public void removeKVContainerData(long containerID) throws IOException {
try (BatchOperation batch = getBatchHandler().initBatchOperation()) {
getMetadataTable().deleteBatchWithPrefix(batch, prefix);
getBlockDataTable().deleteBatchWithPrefix(batch, prefix);
getDeletedBlocksTable().deleteBatchWithPrefix(batch, prefix);
getDeleteTransactionTable().deleteBatchWithPrefix(batch, prefix);
getBatchHandler().commitBatchOperation(batch);
}
Expand All @@ -112,8 +111,6 @@ public void dumpKVContainerData(long containerID, File dumpDir)
getTableDumpFile(getMetadataTable(), dumpDir), prefix);
getBlockDataTable().dumpToFileWithPrefix(
getTableDumpFile(getBlockDataTable(), dumpDir), prefix);
getDeletedBlocksTable().dumpToFileWithPrefix(
getTableDumpFile(getDeletedBlocksTable(), dumpDir), prefix);
getDeleteTransactionTable().dumpToFileWithPrefix(
getTableDumpFile(getDeleteTransactionTable(), dumpDir),
prefix);
Expand All @@ -125,8 +122,6 @@ public void loadKVContainerData(File dumpDir)
getTableDumpFile(getMetadataTable(), dumpDir));
getBlockDataTable().loadFromFile(
getTableDumpFile(getBlockDataTable(), dumpDir));
getDeletedBlocksTable().loadFromFile(
getTableDumpFile(getDeletedBlocksTable(), dumpDir));
getDeleteTransactionTable().loadFromFile(
getTableDumpFile(getDeleteTransactionTable(), dumpDir));
}
Expand Down

0 comments on commit decb696

Please sign in to comment.