diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a8854aa6971..8c98c47ba06 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -31,7 +31,7 @@ env:
# Minimum required Java version for running Ozone is defined in pom.xml (javac.version).
TEST_JAVA_VERSION: 17 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image
MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
- HADOOP_IMAGE: ghcr.io/apache/hadoop
+ HADOOP_IMAGE: ghcr.io/chensammi/hadoop
OZONE_IMAGE: ghcr.io/apache/ozone
OZONE_RUNNER_IMAGE: ghcr.io/apache/ozone-runner
OZONE_WITH_COVERAGE: ${{ github.event_name == 'push' }}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 30c8c8e766f..10630ef1e72 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -23,9 +23,6 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.Config;
import org.apache.hadoop.hdds.conf.ConfigGroup;
import org.apache.hadoop.hdds.conf.ConfigType;
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
index 5fa6828484e..9399db2bec3 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
@@ -426,7 +426,7 @@ private void acquireClient() throws IOException {
// fall back to acquire GRPC client
if (xceiverClientFactory != null && xceiverClientGrpc == null) {
try {
- xceiverClientGrpc = xceiverClientFactory.acquireClientForReadData(pipeline, false);
+ xceiverClientGrpc = xceiverClientFactory.acquireClientForReadData(pipeline);
} catch (IOException ioe) {
LOG.warn("Failed to acquire client for pipeline {}, block {}", pipeline, blockID);
throw ioe;
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/DomainSocketFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/DomainSocketFactory.java
index ee2bd62afd2..0bc2806b3bb 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/DomainSocketFactory.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/DomainSocketFactory.java
@@ -26,7 +26,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Timer;
@@ -153,12 +152,6 @@ private DomainSocketFactory(ConfigurationSource conf) {
LOG.warn(FEATURE + " cannot be used because " + nativeLibraryLoadFailureReason);
pathInfo = PathInfo.DISABLED;
} else {
- File file = new File(domainSocketPath);
- if (file.exists()) {
- throw new IllegalArgumentException(FEATURE + " is enabled but "
- + OzoneClientConfig.OZONE_DOMAIN_SOCKET_PATH + " is an existing " +
- (file.isDirectory() ? "directory" : "file"));
- }
pathInfo = PathInfo.VALID;
isEnabled = true;
timer = new Timer(DomainSocketFactory.class.getSimpleName() + "-Timer");
@@ -274,9 +267,11 @@ public Timer getTimer() {
}
public static synchronized void close() {
- if (instance.getTimer() != null) {
- instance.getTimer().cancel();
+ if (instance != null) {
+ if (instance.getTimer() != null) {
+ instance.getTimer().cancel();
+ }
+ DomainSocketFactory.instance = null;
}
- DomainSocketFactory.instance = null;
}
}
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index fdeb5c1c043..a2f550a3158 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2003,7 +2003,16 @@
30s
OZONE, CLIENT, MANAGEMENT
- Timeout for ozone grpc client during read.
+ Timeout for ozone grpc and short-circuit client during read.
+
+
+
+
+ ozone.client.write.timeout
+ 30s
+ OZONE, CLIENT, MANAGEMENT
+
+ Timeout for ozone short-circuit client during write.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/Receiver.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/Receiver.java
index b0f01f0fb49..476fddbb768 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/Receiver.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/Receiver.java
@@ -296,7 +296,7 @@ private boolean isSupportedCmdType(ContainerProtos.Type type) {
return type == ContainerProtos.Type.GetBlock || type == ContainerProtos.Type.Echo;
}
- class TaskEntry {
+ static class TaskEntry {
private ContainerCommandRequestProto request;
private ContainerCommandResponseProto response;
private FileInputStream fis;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerDomainSocket.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerDomainSocket.java
index c3f0d540b24..715db66d0d8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerDomainSocket.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerDomainSocket.java
@@ -229,7 +229,7 @@ public void run() {
LOG.info("XceiverServerDomainSocket is closed", ace);
} catch (IOException ie) {
// usually when the xceiver count limit is hit.
- LOG.warn("Got an exception. Peer {}", peer.toString(), ie);
+ LOG.warn("Got an exception. Peer {}", peer, ie);
IOUtils.closeQuietly(peer);
} catch (OutOfMemoryError ie) {
IOUtils.closeQuietly(peer);
diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh
index 48108f2e72b..03972825f48 100644
--- a/hadoop-ozone/dev-support/checks/_lib.sh
+++ b/hadoop-ozone/dev-support/checks/_lib.sh
@@ -161,10 +161,10 @@ download_hadoop_aws() {
if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then
mkdir -p "${dir}"
if [[ ! -f "${dir}.tar.gz" ]]; then
- local url="https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz"
+ local url="https://github.com/ChenSammi/hadoop/archive/refs/tags/release-${HADOOP_VERSION}-RC0.tar.gz"
echo "Downloading Hadoop from ${url}"
curl -LSs --fail -o "${dir}.tar.gz" "$url" || return 1
fi
- tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1
+ tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*/hadoop-tools/hadoop-aws' || return 1
fi
}
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env
index 6507664fad7..2de359fc5db 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env
@@ -15,7 +15,6 @@
# limitations under the License.
HDDS_VERSION=${hdds.version}
-HADOOP_IMAGE=apache/hadoop
OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
OZONE_RUNNER_IMAGE=apache/ozone-runner
OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/.env b/hadoop-ozone/dist/src/main/compose/ozone/.env
index 6507664fad7..2de359fc5db 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone/.env
@@ -15,7 +15,6 @@
# limitations under the License.
HDDS_VERSION=${hdds.version}
-HADOOP_IMAGE=apache/hadoop
OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
OZONE_RUNNER_IMAGE=apache/ozone-runner
OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env
index c3a2c5329aa..c524f432a89 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env
@@ -15,7 +15,7 @@
# limitations under the License.
HDDS_VERSION=${hdds.version}
-HADOOP_IMAGE=apache/hadoop
+HADOOP_IMAGE=${hadoop.image}
HADOOP_VERSION=${hadoop.version}
OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
index c3a2c5329aa..c524f432a89 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
@@ -15,7 +15,7 @@
# limitations under the License.
HDDS_VERSION=${hdds.version}
-HADOOP_IMAGE=apache/hadoop
+HADOOP_IMAGE=${hadoop.image}
HADOOP_VERSION=${hadoop.version}
OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
index c3a2c5329aa..c524f432a89 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
@@ -15,7 +15,7 @@
# limitations under the License.
HDDS_VERSION=${hdds.version}
-HADOOP_IMAGE=apache/hadoop
+HADOOP_IMAGE=${hadoop.image}
HADOOP_VERSION=${hadoop.version}
OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env
index 2625c4fbe90..cfe9468a5cd 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env
@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-HADOOP_IMAGE=apache/hadoop
+HADOOP_IMAGE=${hadoop.image}
HADOOP_VERSION=${hadoop.version}
HDDS_VERSION=${hdds.version}
OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot
index 76e0cadf372..a9fb2d36b54 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot
@@ -42,7 +42,7 @@ Create bucket with invalid bucket name
${result} = Execute AWSS3APICli and checkrc create-bucket --bucket invalid_bucket_${randStr} 255
Should contain ${result} InvalidBucketName
-Create new bucket and check no group ACL
+Create new bucket and check default group ACL
${bucket} = Create bucket
${acl} = Execute ozone sh bucket getacl s3v/${bucket}
${group} = Get Regexp Matches ${acl} "GROUP"
@@ -50,5 +50,6 @@ Create new bucket and check no group ACL
${json} = Evaluate json.loads('''${acl}''') json
# make sure this check is for group acl
Should contain ${json}[1][type] GROUP
- Should contain ${json}[1][aclList] NONE
+ Should contain ${json}[1][aclList] READ
+ Should contain ${json}[1][aclList] LIST
END
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverServerDomainSocket.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverServerDomainSocket.java
index f9039d6a10c..ff927abd45f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverServerDomainSocket.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverServerDomainSocket.java
@@ -18,6 +18,7 @@
import com.google.common.collect.Maps;
import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -38,6 +39,7 @@
import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerDomainSocket;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
@@ -158,21 +160,22 @@ public void testIllegalDomainPathConfiguration() {
} finally {
factory.close();
}
+ }
- // an existing domain path, the existing regular file will be overwritten and turned into a socket file,
- // so configure an existing domain path is disallowed.
+ @Test
+ public void testExistingDomainPath() {
+ // an existing domain path, the existing file is override and changed from a normal file to a socket file
conf.set(OzoneClientConfig.OZONE_DOMAIN_SOCKET_PATH, new File(dir, "ozone-socket").getAbsolutePath());
- File file = new File(dir, "ozone-socket");
+ DomainSocketFactory factory = DomainSocketFactory.getInstance(conf);
try {
+ File file = new File(dir, "ozone-socket");
assertTrue(file.createNewFile());
- DomainSocketFactory.getInstance(conf);
- fail("an existing domain path is not allowed.");
+ new XceiverServerDomainSocket(MockDatanodeDetails.randomDatanodeDetails(),
+ conf, null, readExecutors, metrics, factory);
} catch (Throwable e) {
- e.printStackTrace();
- assertTrue(e instanceof IllegalArgumentException);
- assertTrue(e.getMessage().contains("an existing file"));
+ fail("an existing domain path is supported by not recommended.");
} finally {
- file.delete();
+ factory.close();
}
}
@@ -347,12 +350,7 @@ public void testReadWrite(boolean deleteFileBeforeRead, boolean deleteFileDuring
assertEquals(1, containerMetrics.getContainerLocalOpsMetrics(ContainerProtos.Type.GetBlock));
} finally {
factory.close();
- if (sock != null) {
- try {
- sock.close();
- } catch (IOException e) {
- }
- }
+ IOUtils.closeQuietly(sock);
server.stop();
}
}
@@ -372,12 +370,7 @@ public void testServerNotListening() {
assertTrue(e.getMessage().contains("connect(2) error: No such file or directory"));
} finally {
factory.close();
- if (sock != null) {
- try {
- sock.close();
- } catch (IOException e) {
- }
- }
+ IOUtils.closeQuietly(sock);
}
}
@@ -392,29 +385,28 @@ public void testServerNotStart() {
XceiverServerDomainSocket server = new XceiverServerDomainSocket(MockDatanodeDetails.randomDatanodeDetails(),
conf, null, readExecutors, metrics, factory);
DomainSocket sock = null;
+ DataOutputStream outputStream = null;
+ DataInputStream inputStream = null;
try {
sock = factory.createSocket(readTimeout, writeTimeout, localhost);
assertTrue(sock.isOpen());
// send request
- final DataOutputStream outputStream = new DataOutputStream(sock.getOutputStream());
+ outputStream = new DataOutputStream(sock.getOutputStream());
outputStream.writeShort(OzoneClientConfig.DATA_TRANSFER_VERSION);
outputStream.writeShort(GetBlock.getNumber());
getBlockRequest().writeDelimitedTo(outputStream);
outputStream.flush();
- final DataInputStream inputStream = new DataInputStream(sock.getInputStream());
+ inputStream = new DataInputStream(sock.getInputStream());
inputStream.readShort();
} catch (IOException e) {
assertTrue(e instanceof SocketTimeoutException);
assertTrue(e.getMessage().contains("read(2) error: Resource temporarily unavailable"));
} finally {
factory.close();
- if (sock != null) {
- try {
- sock.close();
- } catch (IOException e) {
- }
- }
+ IOUtils.closeQuietly(outputStream);
+ IOUtils.closeQuietly(inputStream);
+ IOUtils.closeQuietly(sock);
server.stop();
}
}
@@ -445,12 +437,7 @@ public void testReadTimeout() throws InterruptedException {
assertTrue(e.getMessage().contains("write(2) error: Broken pipe"));
} finally {
factory.close();
- if (sock != null) {
- try {
- sock.close();
- } catch (IOException e) {
- }
- }
+ IOUtils.closeQuietly(sock);
server.stop();
}
}
@@ -458,6 +445,9 @@ public void testReadTimeout() throws InterruptedException {
@Test
public void testMaxXceiverCount() throws IOException, InterruptedException {
conf.set(OzoneClientConfig.OZONE_DOMAIN_SOCKET_PATH, new File(dir, "ozone-socket").getAbsolutePath());
+ DatanodeConfiguration datanodeConfiguration = conf.getObject(DatanodeConfiguration.class);
+ datanodeConfiguration.setNumReadThreadPerVolume(2);
+ conf.setFromObject(datanodeConfiguration);
DomainSocketFactory factory = DomainSocketFactory.getInstance(conf);
XceiverServerDomainSocket server = new XceiverServerDomainSocket(MockDatanodeDetails.randomDatanodeDetails(),
conf, null, readExecutors, metrics, factory);
@@ -466,20 +456,21 @@ public void testMaxXceiverCount() throws IOException, InterruptedException {
GenericTestUtils.LogCapturer.captureLogs(XceiverServerDomainSocket.LOG);
try {
server.start();
- // test max allowed xceiver count(default 10 * 5)
- int count = 51;
+ // test max allowed xceiver count(2 * 5)
+ int count = 11;
for (int i = 1; i <= count; i++) {
DomainSocket sock = factory.createSocket(readTimeout, writeTimeout, localhost);
list.add(sock);
}
- logCapturer.getOutput().contains("Xceiver count exceeds the limit" + (count - 1));
+ Thread.sleep(2000);
+ assertTrue(logCapturer.getOutput().contains("Xceiver count exceeds the limit " + (count - 1)));
DomainSocket lastSock = list.get(list.size() - 1);
// although remote peer is already closed due to limit exhausted, sock.isOpen() is still true.
// Only when client read/write socket stream, there will be exception or -1 returned.
assertTrue(lastSock.isOpen());
- // write to first 50 sockets should be OK
+ // write to first 10 sockets should be OK
for (int i = 0; i < count - 2; i++) {
DomainSocket sock = list.get(i);
assertTrue(sock.isOpen());
@@ -489,7 +480,6 @@ public void testMaxXceiverCount() throws IOException, InterruptedException {
assertFalse(sock.isOpen());
}
- Thread.sleep(5000);
// read a broken pipe will return -1
int data = lastSock.getInputStream().read();
assertEquals(-1, data);
@@ -522,24 +512,21 @@ public void testSendIrrelevantMessage() {
XceiverServerDomainSocket server = new XceiverServerDomainSocket(MockDatanodeDetails.randomDatanodeDetails(),
conf, null, readExecutors, metrics, factory);
DomainSocket sock = null;
+ DataOutputStream outputStream = null;
String data = "hello world";
try {
server.start();
sock = factory.createSocket(readTimeout, writeTimeout, localhost);
- final DataOutputStream outputStream = new DataOutputStream(sock.getOutputStream());
- outputStream.write(data.getBytes());
+ outputStream = new DataOutputStream(sock.getOutputStream());
+ outputStream.write(data.getBytes(StandardCharsets.UTF_8));
outputStream.flush();
sock.getInputStream().read();
} catch (IOException e) {
assertTrue(e instanceof EOFException);
} finally {
factory.close();
- if (sock != null) {
- try {
- sock.close();
- } catch (IOException e) {
- }
- }
+ IOUtils.closeQuietly(outputStream);
+ IOUtils.closeQuietly(sock);
server.stop();
}
}
@@ -573,12 +560,7 @@ public void testSendUnsupportedRequest() throws IOException {
assertTrue(responseProto.getResult() == ContainerProtos.Result.UNSUPPORTED_REQUEST);
} finally {
factory.close();
- if (sock != null) {
- try {
- sock.close();
- } catch (IOException e) {
- }
- }
+ IOUtils.closeQuietly(sock);
server.stop();
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestDomainSocketFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestDomainSocketFactory.java
index 677e2c8ca58..0dd0e2c7a8b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestDomainSocketFactory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestDomainSocketFactory.java
@@ -26,6 +26,7 @@
import java.io.File;
import java.net.InetSocketAddress;
+
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -46,7 +47,7 @@ private DomainSocketFactory getDomainSocketFactory() {
clientConfig.setShortCircuit(true);
clientConfig.setShortCircuitReadDisableInterval(1);
conf.setFromObject(clientConfig);
- conf.set(OzoneClientConfig.OZONE_DOMAIN_SOCKET_PATH, new File(dir, "ozone-socket").getAbsolutePath());
+ conf.set(OzoneClientConfig.OZONE_DOMAIN_SOCKET_PATH, new File(dir, "ozone-socket").getAbsolutePath());
// create DomainSocketFactory
DomainSocketFactory domainSocketFactory = DomainSocketFactory.getInstance(conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
index b48a7067cad..75c49d0e9ef 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
@@ -219,8 +219,12 @@ private static XceiverClientManager mockDataNodeClientFactory()
.thenCallRealMethod();
when(manager.acquireClient(argThat(matchEmptyPipeline()),
anyBoolean())).thenCallRealMethod();
+ when(manager.acquireClient(argThat(matchEmptyPipeline()), anyBoolean(), anyBoolean()))
+ .thenCallRealMethod();
when(manager.acquireClientForReadData(argThat(matchEmptyPipeline())))
.thenCallRealMethod();
+ when(manager.acquireClientForReadData(argThat(matchEmptyPipeline()), anyBoolean()))
+ .thenCallRealMethod();
when(manager.acquireClient(argThat(matchPipeline(DN1))))
.thenReturn(mockDn1Protocol);
diff --git a/pom.xml b/pom.xml
index 16023cb28bd..b35c5b82392 100644
--- a/pom.xml
+++ b/pom.xml
@@ -67,7 +67,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
2023-01-01T00:00:00Z
2.10.2
- 3.4.1
+ 3.4.2-20241202-SNAPSHOT
+ ghcr.io/chensammi/hadoop
${ozone.version}