Skip to content

Commit 27ce8a3

Browse files
authored
fix(hadoop): Upgrade nimbus-jose-jwt in Hadoop to fix CVE-2025-53864 (#1245)
* fix(hadoop): Upgrade nimbus-jose-jwt in Hadoop 3.4.1 to fix CVE-2025-53864 * add patch to upcoming Hadoop version as well * add upstream reference to commit message
1 parent c68dfe4 commit 27ce8a3

10 files changed

+876
-2
lines changed

hadoop/hadoop/stackable/patches/3.4.1/0011-HADOOP-18583.-Fix-loading-of-OpenSSL-3.x-symbols-525.patch

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
From cd1c23ea5bddd2796caf2590fef467e488c3bcbf Mon Sep 17 00:00:00 2001
1+
From 932464d9fbf23f9042fee2f8b4be6029174d2ca4 Mon Sep 17 00:00:00 2001
22
From: Sebastian Klemke <[email protected]>
33
Date: Thu, 7 Nov 2024 19:14:13 +0100
4-
Subject: HADOOP-18583. Fix loading of OpenSSL 3.x symbols (#5256) (#7149)
4+
Subject: HADOOP-18583. Fix loading of OpenSSL 3.x symbols (#5256) (#7149)
55

66
Contributed by Sebastian Klemke
77
---
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
From 672f429d62c9ef7c5fdd00cf856a698f438c7cec Mon Sep 17 00:00:00 2001
2+
From: xeniape <[email protected]>
3+
Date: Thu, 11 Sep 2025 12:14:05 +0200
4+
Subject: Upgrade-nimbus-jose-jwt-to-9.37.4-to-fix-CVE-2025-53864, Upstream
5+
reference: https://github.com/apache/hadoop/pull/7870
6+
7+
---
8+
LICENSE-binary | 2 +-
9+
hadoop-project/pom.xml | 2 +-
10+
2 files changed, 2 insertions(+), 2 deletions(-)
11+
12+
diff --git a/LICENSE-binary b/LICENSE-binary
13+
index 90da3d032b..fdcb5c0a1f 100644
14+
--- a/LICENSE-binary
15+
+++ b/LICENSE-binary
16+
@@ -240,7 +240,7 @@ com.google.guava:guava:20.0
17+
com.google.guava:guava:32.0.1-jre
18+
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
19+
com.microsoft.azure:azure-storage:7.0.0
20+
-com.nimbusds:nimbus-jose-jwt:9.37.2
21+
+com.nimbusds:nimbus-jose-jwt:9.37.4
22+
com.zaxxer:HikariCP:4.0.3
23+
commons-beanutils:commons-beanutils:1.9.4
24+
commons-cli:commons-cli:1.5.0
25+
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
26+
index 155cdf9841..e23f524224 100644
27+
--- a/hadoop-project/pom.xml
28+
+++ b/hadoop-project/pom.xml
29+
@@ -216,7 +216,7 @@
30+
<openssl-wildfly.version>1.1.3.Final</openssl-wildfly.version>
31+
<jsonschema2pojo.version>1.0.2</jsonschema2pojo.version>
32+
<woodstox.version>5.4.0</woodstox.version>
33+
- <nimbus-jose-jwt.version>9.37.2</nimbus-jose-jwt.version>
34+
+ <nimbus-jose-jwt.version>9.37.4</nimbus-jose-jwt.version>
35+
<nodejs.version>v14.17.0</nodejs.version>
36+
<yarnpkg.version>v1.22.5</yarnpkg.version>
37+
<apache-ant.version>1.10.13</apache-ant.version>
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
From c4dbb05b4f92f93c7e8f11d6a622b73f40f4664c Mon Sep 17 00:00:00 2001
2+
From: xeniape <[email protected]>
3+
Date: Wed, 10 Sep 2025 14:18:38 +0200
4+
Subject: YARN-11527-Update-node.js
5+
6+
---
7+
hadoop-project/pom.xml | 2 +-
8+
1 file changed, 1 insertion(+), 1 deletion(-)
9+
10+
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
11+
index b9eacd5ba3..70f64bf55c 100644
12+
--- a/hadoop-project/pom.xml
13+
+++ b/hadoop-project/pom.xml
14+
@@ -234,7 +234,7 @@
15+
<jsonschema2pojo.version>1.0.2</jsonschema2pojo.version>
16+
<woodstox.version>5.4.0</woodstox.version>
17+
<nimbus-jose-jwt.version>9.37.2</nimbus-jose-jwt.version>
18+
- <nodejs.version>v12.22.1</nodejs.version>
19+
+ <nodejs.version>v14.17.0</nodejs.version>
20+
<yarnpkg.version>v1.22.5</yarnpkg.version>
21+
<apache-ant.version>1.10.13</apache-ant.version>
22+
<jmh.version>1.20</jmh.version>
Lines changed: 259 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,259 @@
1+
From adc337817824ba29e7eb669c13730acdbb0b9630 Mon Sep 17 00:00:00 2001
2+
From: xeniape <[email protected]>
3+
Date: Wed, 10 Sep 2025 14:36:20 +0200
4+
Subject: Allow-overriding-datanode-registration-addresses
5+
6+
---
7+
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 9 +++
8+
.../blockmanagement/DatanodeManager.java | 43 +++++++-----
9+
.../hadoop/hdfs/server/datanode/DNConf.java | 70 +++++++++++++++++++
10+
.../hadoop/hdfs/server/datanode/DataNode.java | 35 ++++++++--
11+
4 files changed, 135 insertions(+), 22 deletions(-)
12+
13+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
14+
index f92a2ad565..25bcd438c7 100755
15+
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
16+
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
17+
@@ -152,6 +152,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
18+
public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT = false;
19+
public static final String DFS_DATANODE_USE_DN_HOSTNAME = "dfs.datanode.use.datanode.hostname";
20+
public static final boolean DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;
21+
+
22+
+ public static final String DFS_DATANODE_REGISTERED_HOSTNAME = "dfs.datanode.registered.hostname";
23+
+ public static final String DFS_DATANODE_REGISTERED_DATA_PORT = "dfs.datanode.registered.port";
24+
+ public static final String DFS_DATANODE_REGISTERED_HTTP_PORT = "dfs.datanode.registered.http.port";
25+
+ public static final String DFS_DATANODE_REGISTERED_HTTPS_PORT = "dfs.datanode.registered.https.port";
26+
+ public static final String DFS_DATANODE_REGISTERED_IPC_PORT = "dfs.datanode.registered.ipc.port";
27+
+
28+
public static final String DFS_DATANODE_MAX_LOCKED_MEMORY_KEY = "dfs.datanode.max.locked.memory";
29+
public static final long DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT = 0;
30+
public static final String DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY = "dfs.datanode.fsdatasetcache.max.threads.per.volume";
31+
@@ -491,6 +498,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
32+
public static final long DFS_DATANODE_PROCESS_COMMANDS_THRESHOLD_DEFAULT =
33+
TimeUnit.SECONDS.toMillis(2);
34+
35+
+ public static final String DFS_NAMENODE_DATANODE_REGISTRATION_UNSAFE_ALLOW_ADDRESS_OVERRIDE_KEY = "dfs.namenode.datanode.registration.unsafe.allow-address-override";
36+
+ public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_UNSAFE_ALLOW_ADDRESS_OVERRIDE_DEFAULT = false;
37+
public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check";
38+
public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true;
39+
40+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
41+
index ebd2fa992e..c56f254478 100644
42+
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
43+
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
44+
@@ -181,6 +181,8 @@ public class DatanodeManager {
45+
private boolean hasClusterEverBeenMultiRack = false;
46+
47+
private final boolean checkIpHostnameInRegistration;
48+
+ private final boolean allowRegistrationAddressOverride;
49+
+
50+
/**
51+
* Whether we should tell datanodes what to cache in replies to
52+
* heartbeat messages.
53+
@@ -314,6 +316,11 @@ public class DatanodeManager {
54+
// Block invalidate limit also has some dependency on heartbeat interval.
55+
// Check setBlockInvalidateLimit().
56+
setBlockInvalidateLimit(configuredBlockInvalidateLimit);
57+
+ this.allowRegistrationAddressOverride = conf.getBoolean(
58+
+ DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_UNSAFE_ALLOW_ADDRESS_OVERRIDE_KEY,
59+
+ DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_UNSAFE_ALLOW_ADDRESS_OVERRIDE_DEFAULT);
60+
+ LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_UNSAFE_ALLOW_ADDRESS_OVERRIDE_KEY
61+
+ + "=" + allowRegistrationAddressOverride);
62+
this.checkIpHostnameInRegistration = conf.getBoolean(
63+
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
64+
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT);
65+
@@ -1158,27 +1165,29 @@ public class DatanodeManager {
66+
*/
67+
public void registerDatanode(DatanodeRegistration nodeReg)
68+
throws DisallowedDatanodeException, UnresolvedTopologyException {
69+
- InetAddress dnAddress = Server.getRemoteIp();
70+
- if (dnAddress != null) {
71+
- // Mostly called inside an RPC, update ip and peer hostname
72+
- String hostname = dnAddress.getHostName();
73+
- String ip = dnAddress.getHostAddress();
74+
- if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) {
75+
- // Reject registration of unresolved datanode to prevent performance
76+
- // impact of repetitive DNS lookups later.
77+
- final String message = "hostname cannot be resolved (ip="
78+
- + ip + ", hostname=" + hostname + ")";
79+
- LOG.warn("Unresolved datanode registration: " + message);
80+
- throw new DisallowedDatanodeException(nodeReg, message);
81+
+ if (!allowRegistrationAddressOverride) {
82+
+ InetAddress dnAddress = Server.getRemoteIp();
83+
+ if (dnAddress != null) {
84+
+ // Mostly called inside an RPC, update ip and peer hostname
85+
+ String hostname = dnAddress.getHostName();
86+
+ String ip = dnAddress.getHostAddress();
87+
+ if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) {
88+
+ // Reject registration of unresolved datanode to prevent performance
89+
+ // impact of repetitive DNS lookups later.
90+
+ final String message = "hostname cannot be resolved (ip="
91+
+ + ip + ", hostname=" + hostname + ")";
92+
+ LOG.warn("Unresolved datanode registration: " + message);
93+
+ throw new DisallowedDatanodeException(nodeReg, message);
94+
+ }
95+
+ // update node registration with the ip and hostname from rpc request
96+
+ nodeReg.setIpAddr(ip);
97+
+ nodeReg.setPeerHostName(hostname);
98+
}
99+
- // update node registration with the ip and hostname from rpc request
100+
- nodeReg.setIpAddr(ip);
101+
- nodeReg.setPeerHostName(hostname);
102+
}
103+
-
104+
+
105+
try {
106+
nodeReg.setExportedKeys(blockManager.getBlockKeys());
107+
-
108+
+
109+
// Checks if the node is not on the hosts list. If it is not, then
110+
// it will be disallowed from registering.
111+
if (!hostConfigManager.isIncluded(nodeReg)) {
112+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
113+
index 21b92db307..5d3437239c 100644
114+
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
115+
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
116+
@@ -101,6 +101,11 @@ public class DNConf {
117+
final boolean syncOnClose;
118+
final boolean encryptDataTransfer;
119+
final boolean connectToDnViaHostname;
120+
+ private final String registeredHostname;
121+
+ private final int registeredDataPort;
122+
+ private final int registeredHttpPort;
123+
+ private final int registeredHttpsPort;
124+
+ private final int registeredIpcPort;
125+
final boolean overwriteDownstreamDerivedQOP;
126+
private final boolean pmemCacheRecoveryEnabled;
127+
128+
@@ -189,6 +194,11 @@ public class DNConf {
129+
connectToDnViaHostname = getConf().getBoolean(
130+
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
131+
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
132+
+ registeredHostname = getConf().get(DFSConfigKeys.DFS_DATANODE_REGISTERED_HOSTNAME);
133+
+ registeredDataPort = getConf().getInt(DFSConfigKeys.DFS_DATANODE_REGISTERED_DATA_PORT, -1);
134+
+ registeredHttpPort = getConf().getInt(DFSConfigKeys.DFS_DATANODE_REGISTERED_HTTP_PORT, -1);
135+
+ registeredHttpsPort = getConf().getInt(DFSConfigKeys.DFS_DATANODE_REGISTERED_HTTPS_PORT, -1);
136+
+ registeredIpcPort = getConf().getInt(DFSConfigKeys.DFS_DATANODE_REGISTERED_IPC_PORT, -1);
137+
this.blockReportInterval = getConf().getLong(
138+
DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
139+
DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
140+
@@ -363,6 +373,66 @@ public class DNConf {
141+
return connectToDnViaHostname;
142+
}
143+
144+
+ /**
145+
+ * Returns a hostname to register with the cluster instead of the system
146+
+ * hostname.
147+
+ * This is an expert setting and can be used in multihoming scenarios to
148+
+ * override the detected hostname.
149+
+ *
150+
+ * @return null if the system hostname should be used, otherwise a hostname
151+
+ */
152+
+ public String getRegisteredHostname() {
153+
+ return registeredHostname;
154+
+ }
155+
+
156+
+ /**
157+
+ * Returns a port number to register with the cluster instead of the
158+
+ * data port that the node is listening on.
159+
+ * This is an expert setting and can be used in multihoming scenarios to
160+
+ * override the detected port.
161+
+ *
162+
+ * @return -1 if the actual port should be used, otherwise a port number
163+
+ */
164+
+ public int getRegisteredDataPort() {
165+
+ return registeredDataPort;
166+
+ }
167+
+
168+
+ /**
169+
+ * Returns a port number to register with the cluster instead of the
170+
+ * HTTP port that the node is listening on.
171+
+ * This is an expert setting and can be used in multihoming scenarios to
172+
+ * override the detected port.
173+
+ *
174+
+ * @return -1 if the actual port should be used, otherwise a port number
175+
+ */
176+
+ public int getRegisteredHttpPort() {
177+
+ return registeredHttpPort;
178+
+ }
179+
+
180+
+ /**
181+
+ * Returns a port number to register with the cluster instead of the
182+
+ * HTTPS port that the node is listening on.
183+
+ * This is an expert setting and can be used in multihoming scenarios to
184+
+ * override the detected port.
185+
+ *
186+
+ * @return -1 if the actual port should be used, otherwise a port number
187+
+ */
188+
+ public int getRegisteredHttpsPort() {
189+
+ return registeredHttpsPort;
190+
+ }
191+
+
192+
+ /**
193+
+ * Returns a port number to register with the cluster instead of the
194+
+ * IPC port that the node is listening on.
195+
+ * This is an expert setting and can be used in multihoming scenarios to
196+
+ * override the detected port.
197+
+ *
198+
+ * @return -1 if the actual port should be used, otherwise a port number
199+
+ */
200+
+ public int getRegisteredIpcPort() {
201+
+ return registeredIpcPort;
202+
+ }
203+
+
204+
/**
205+
* Returns socket timeout
206+
*
207+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
208+
index 956f5bbe51..22ae127d98 100644
209+
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
210+
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
211+
@@ -135,6 +135,7 @@ import java.util.HashSet;
212+
import java.util.Iterator;
213+
import java.util.List;
214+
import java.util.Map;
215+
+import java.util.Optional;
216+
import java.util.Map.Entry;
217+
import java.util.Set;
218+
import java.util.UUID;
219+
@@ -2076,11 +2077,35 @@ public class DataNode extends ReconfigurableBase
220+
NodeType.DATA_NODE);
221+
}
222+
223+
- DatanodeID dnId = new DatanodeID(
224+
- streamingAddr.getAddress().getHostAddress(), hostName,
225+
- storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
226+
- infoSecurePort, getIpcPort());
227+
- return new DatanodeRegistration(dnId, storageInfo,
228+
+ String registeredHostname = Optional
229+
+ .ofNullable(dnConf.getRegisteredHostname())
230+
+ .orElseGet(() -> streamingAddr.getAddress().getHostAddress());
231+
+ int registeredDataPort = dnConf.getRegisteredDataPort();
232+
+ if (registeredDataPort == -1) {
233+
+ registeredDataPort = getXferPort();
234+
+ }
235+
+ int registeredHttpPort = dnConf.getRegisteredHttpPort();
236+
+ if (registeredHttpPort == -1) {
237+
+ registeredHttpPort = getInfoPort();
238+
+ }
239+
+ int registeredHttpsPort = dnConf.getRegisteredHttpsPort();
240+
+ if (registeredHttpsPort == -1) {
241+
+ registeredHttpsPort = getInfoSecurePort();
242+
+ }
243+
+ int registeredIpcPort = dnConf.getRegisteredIpcPort();
244+
+ if (registeredIpcPort == -1) {
245+
+ registeredIpcPort = getIpcPort();
246+
+ }
247+
+
248+
+ DatanodeID dnId = new DatanodeID(registeredHostname,
249+
+ registeredHostname,
250+
+ storage.getDatanodeUuid(),
251+
+ registeredDataPort,
252+
+ registeredHttpPort,
253+
+ registeredHttpsPort,
254+
+ registeredIpcPort);
255+
+
256+
+ return new DatanodeRegistration(dnId, storageInfo,
257+
new ExportedBlockKeys(), VersionInfo.getVersion());
258+
}
259+
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
From ab9550bd7b71c16c381a105a22732f6e71f2dba6 Mon Sep 17 00:00:00 2001
2+
From: xeniape <[email protected]>
3+
Date: Wed, 10 Sep 2025 14:39:20 +0200
4+
Subject: Async-profiler-also-grab-itimer-events
5+
6+
---
7+
.../src/main/java/org/apache/hadoop/http/ProfileServlet.java | 2 ++
8+
1 file changed, 2 insertions(+)
9+
10+
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java
11+
index ce53274151..909892ff90 100644
12+
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java
13+
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java
14+
@@ -76,6 +76,7 @@ import org.apache.hadoop.util.ProcessUtils;
15+
* Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events)
16+
* // Perf events:
17+
* // cpu
18+
+ * // itimer
19+
* // page-faults
20+
* // context-switches
21+
* // cycles
22+
@@ -118,6 +119,7 @@ public class ProfileServlet extends HttpServlet {
23+
private enum Event {
24+
25+
CPU("cpu"),
26+
+ ITIMER("itimer"),
27+
ALLOC("alloc"),
28+
LOCK("lock"),
29+
PAGE_FAULTS("page-faults"),

0 commit comments

Comments
 (0)