Skip to content

Commit

Permalink
Merge branch 'Hadoop-19357_configs' of github.com:manika137/hadoop in…
Browse files Browse the repository at this point in the history
…to Hadoop-19357_configs
  • Loading branch information
manika137 committed Dec 11, 2024
2 parents 7b290a4 + d9d2b83 commit 11161bd
Show file tree
Hide file tree
Showing 7 changed files with 87 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -263,8 +263,11 @@ private int doRun(String[] args)
rpcServer.stopAndJoin();

elector.quitElection(true);
healthMonitor.shutdown();
healthMonitor.join();

if (healthMonitor != null) {
healthMonitor.shutdown();
healthMonitor.join();
}
}
return 0;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,11 @@ public class BlockCompressorStream extends CompressorStream {
public BlockCompressorStream(OutputStream out, Compressor compressor,
int bufferSize, int compressionOverhead) {
super(out, compressor, bufferSize);
MAX_INPUT_SIZE = bufferSize - compressionOverhead;
if (bufferSize - compressionOverhead >= 0) {
MAX_INPUT_SIZE = bufferSize - compressionOverhead;
} else {
throw new IllegalArgumentException("buffer size is less than compression overhead");
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,16 @@ See the [Commands Manual](./CommandsManual.html) for generic shell options.
appendToFile
------------

Usage: `hadoop fs -appendToFile <localsrc> ... <dst> `
Usage: `hadoop fs -appendToFile [-n] <localsrc> ... <dst>`

Append single src, or multiple srcs from local file system to the destination file system. Also reads input from stdin and appends to destination file system.

Options

* The `-n` option represents that use NEW_BLOCK create flag to append file.

Example:

* `hadoop fs -appendToFile localfile /user/hadoop/hadoopfile`
* `hadoop fs -appendToFile localfile1 localfile2 /user/hadoop/hadoopfile`
* `hadoop fs -appendToFile localfile hdfs://nn.example.com/hadoop/hadoopfile`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,13 @@ private SaslParticipant(SaslClient saslClient) {
}

byte[] createFirstMessage() throws SaslException {
return SaslMechanismFactory.isDefaultMechanism(MECHANISM_ARRAY[0]) ? EMPTY_BYTE_ARRAY
: evaluateChallengeOrResponse(EMPTY_BYTE_ARRAY);
if (saslClient != null) {
return saslClient.hasInitialResponse()
? saslClient.evaluateChallenge(EMPTY_BYTE_ARRAY)
: EMPTY_BYTE_ARRAY;
}
throw new IllegalStateException(
"createFirstMessage must only be called for clients");
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,8 @@ void resetData(Configuration conf) {
this.belowAvgUtilized.clear();
this.underUtilized.clear();
this.policy.reset();
dispatcher.reset(conf);
this.dispatcher.reset(conf);
DefaultMetricsSystem.removeSourceName(metrics.getName());
}

NameNodeConnector getNnc() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ Assign all files to be owned by "rmarathe", write to gzip compressed text:
hadoop org.apache.hadoop.hdfs.server.namenode.FileSystemImage \
-Dhdfs.image.writer.ugi.single.user=rmarathe \
-Ddfs.provided.aliasmap.text.codec=gzip \
-Ddfs.provided.aliasmap.text.write.dir=file:///tmp/
-Ddfs.provided.aliasmap.text.write.dir=file:///tmp/ \
-b org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap \
-u org.apache.hadoop.hdfs.server.namenode.SingleUGIResolver \
-o file:///tmp/name \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
Expand All @@ -46,6 +47,8 @@
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Test;
Expand Down Expand Up @@ -764,6 +767,63 @@ public void testBalancerWithLimitOverUtilizedNum() throws Exception {
}
}

@Test(timeout = 60000)
public void testBalancerMetricsDuplicate() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Init the config (block size to 100)
initConf(conf);
final long totalCapacity = 1000L;
final int numOfOverUtilizedDn = 1;
final int numOfUnderUtilizedDn = 2;
final int totalNumOfDn = numOfOverUtilizedDn + numOfUnderUtilizedDn;
final long[] capacityArray = new long[totalNumOfDn];
Arrays.fill(capacityArray, totalCapacity);
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalNumOfDn)
.simulatedCapacities(capacityArray)
.build()) {
cluster.setDataNodesDead();
List<DataNode> dataNodes = cluster.getDataNodes();

DataNodeTestUtils.triggerHeartbeat(dataNodes.get(0));
DataNodeTestUtils.triggerBlockReport(dataNodes.get(0));
// Create nodes with: 100%
TestBalancer.createFile(cluster, new Path("test_big" + 0), 1000, (short) 1, 0);
cluster.setDataNodesDead();

// Two UnderUtilized in the cluster, execute at least twice: b.runOneIteration()
for (int i = 1; i <= numOfUnderUtilizedDn; i++) {
DataNodeTestUtils.triggerHeartbeat(dataNodes.get(i));
DataNodeTestUtils.triggerBlockReport(dataNodes.get(i));
// Create nodes with: 0%
TestBalancer.createFile(cluster, new Path("test_small" + i), 0, (short) 1, 0);
cluster.setDataNodesDead();
}

cluster.triggerDeletionReports();
cluster.triggerBlockReports();
cluster.triggerHeartbeats();

Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
Collection<String> nsIds = DFSUtilClient.getNameServiceIds(conf);
assertEquals(1, namenodes.size());

// Throw an error when we double-initialize BalancerMetrics
DefaultMetricsSystem.setMiniClusterMode(false);
MetricsSystem instance = DefaultMetricsSystem.instance();
// Avoid the impact of cluster metric, remove cluster JvmMetrics
instance.unregisterSource("JvmMetrics");

final BalancerParameters balancerParameters = Balancer.Cli.parse(new String[] {
"-policy", BalancingPolicy.Node.INSTANCE.getName(),
"-threshold", "10",
});
int r = Balancer.run(namenodes, nsIds, balancerParameters, conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
DefaultMetricsSystem.setMiniClusterMode(true);
}
}

@Test(timeout = 100000)
public void testMaxIterationTime() throws Exception {
final Configuration conf = new HdfsConfiguration();
Expand Down

0 comments on commit 11161bd

Please sign in to comment.