diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index 91f720a49eed9..1f98e55bcd9fa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -263,8 +263,11 @@ private int doRun(String[] args) rpcServer.stopAndJoin(); elector.quitElection(true); - healthMonitor.shutdown(); - healthMonitor.join(); + + if (healthMonitor != null) { + healthMonitor.shutdown(); + healthMonitor.join(); + } } return 0; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java index 434183bbc25b9..3e6826afc8575 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java @@ -54,7 +54,11 @@ public class BlockCompressorStream extends CompressorStream { public BlockCompressorStream(OutputStream out, Compressor compressor, int bufferSize, int compressionOverhead) { super(out, compressor, bufferSize); - MAX_INPUT_SIZE = bufferSize - compressionOverhead; + if (bufferSize - compressionOverhead >= 0) { + MAX_INPUT_SIZE = bufferSize - compressionOverhead; + } else { + throw new IllegalArgumentException("buffer size is less than compression overhead"); + } } /** diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md index 15ad619ea8d0b..34d5d5371bcb0 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md @@ -37,10 +37,16 @@ See the [Commands Manual](./CommandsManual.html) for generic shell options. appendToFile ------------ -Usage: `hadoop fs -appendToFile ... ` +Usage: `hadoop fs -appendToFile [-n] ... ` Append single src, or multiple srcs from local file system to the destination file system. Also reads input from stdin and appends to destination file system. +Options + +* The `-n` option represents that use NEW_BLOCK create flag to append file. + +Example: + * `hadoop fs -appendToFile localfile /user/hadoop/hadoopfile` * `hadoop fs -appendToFile localfile1 localfile2 /user/hadoop/hadoopfile` * `hadoop fs -appendToFile localfile hdfs://nn.example.com/hadoop/hadoopfile` diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java index 7fb2f9f5fe94a..85edcc6a0c5ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java @@ -127,8 +127,13 @@ private SaslParticipant(SaslClient saslClient) { } byte[] createFirstMessage() throws SaslException { - return SaslMechanismFactory.isDefaultMechanism(MECHANISM_ARRAY[0]) ? EMPTY_BYTE_ARRAY - : evaluateChallengeOrResponse(EMPTY_BYTE_ARRAY); + if (saslClient != null) { + return saslClient.hasInitialResponse() + ? saslClient.evaluateChallenge(EMPTY_BYTE_ARRAY) + : EMPTY_BYTE_ARRAY; + } + throw new IllegalStateException( + "createFirstMessage must only be called for clients"); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 68ec0f6f21f5f..4b32a87626961 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -686,7 +686,8 @@ void resetData(Configuration conf) { this.belowAvgUtilized.clear(); this.underUtilized.clear(); this.policy.reset(); - dispatcher.reset(conf); + this.dispatcher.reset(conf); + DefaultMetricsSystem.removeSourceName(metrics.getName()); } NameNodeConnector getNnc() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md index 21145e6908ea5..c3f81482946b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md @@ -124,7 +124,7 @@ Assign all files to be owned by "rmarathe", write to gzip compressed text: hadoop org.apache.hadoop.hdfs.server.namenode.FileSystemImage \ -Dhdfs.image.writer.ugi.single.user=rmarathe \ -Ddfs.provided.aliasmap.text.codec=gzip \ - -Ddfs.provided.aliasmap.text.write.dir=file:///tmp/ + -Ddfs.provided.aliasmap.text.write.dir=file:///tmp/ \ -b org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap \ -u org.apache.hadoop.hdfs.server.namenode.SingleUGIResolver \ -o file:///tmp/name \ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java index adca5ff09331c..f73ed62ea74f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -46,6 +47,8 @@ import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Test; @@ -764,6 +767,63 @@ public void testBalancerWithLimitOverUtilizedNum() throws Exception { } } + @Test(timeout = 60000) + public void testBalancerMetricsDuplicate() throws Exception { + final Configuration conf = new HdfsConfiguration(); + // Init the config (block size to 100) + initConf(conf); + final long totalCapacity = 1000L; + final int numOfOverUtilizedDn = 1; + final int numOfUnderUtilizedDn = 2; + final int totalNumOfDn = numOfOverUtilizedDn + numOfUnderUtilizedDn; + final long[] capacityArray = new long[totalNumOfDn]; + Arrays.fill(capacityArray, totalCapacity); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(totalNumOfDn) + .simulatedCapacities(capacityArray) + .build()) { + cluster.setDataNodesDead(); + List dataNodes = cluster.getDataNodes(); + + DataNodeTestUtils.triggerHeartbeat(dataNodes.get(0)); + DataNodeTestUtils.triggerBlockReport(dataNodes.get(0)); + // Create nodes with: 100% + TestBalancer.createFile(cluster, new Path("test_big" + 0), 1000, (short) 1, 0); + cluster.setDataNodesDead(); + + // Two UnderUtilized in the cluster, execute at least twice: b.runOneIteration() + for (int i = 1; i <= numOfUnderUtilizedDn; i++) { + DataNodeTestUtils.triggerHeartbeat(dataNodes.get(i)); + DataNodeTestUtils.triggerBlockReport(dataNodes.get(i)); + // Create nodes with: 0% + TestBalancer.createFile(cluster, new Path("test_small" + i), 0, (short) 1, 0); + cluster.setDataNodesDead(); + } + + cluster.triggerDeletionReports(); + cluster.triggerBlockReports(); + cluster.triggerHeartbeats(); + + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + Collection nsIds = DFSUtilClient.getNameServiceIds(conf); + assertEquals(1, namenodes.size()); + + // Throw an error when we double-initialize BalancerMetrics + DefaultMetricsSystem.setMiniClusterMode(false); + MetricsSystem instance = DefaultMetricsSystem.instance(); + // Avoid the impact of cluster metric, remove cluster JvmMetrics + instance.unregisterSource("JvmMetrics"); + + final BalancerParameters balancerParameters = Balancer.Cli.parse(new String[] { + "-policy", BalancingPolicy.Node.INSTANCE.getName(), + "-threshold", "10", + }); + int r = Balancer.run(namenodes, nsIds, balancerParameters, conf); + assertEquals(ExitStatus.SUCCESS.getExitCode(), r); + DefaultMetricsSystem.setMiniClusterMode(true); + } + } + @Test(timeout = 100000) public void testMaxIterationTime() throws Exception { final Configuration conf = new HdfsConfiguration();