Skip to content

Commit

Permalink
HDFS-9427. HDFS should not default to ephemeral ports. Contributed by…
Browse files Browse the repository at this point in the history
… Xiaobing Zhou.
  • Loading branch information
Jing9 committed Apr 22, 2016
1 parent c610031 commit 63e5412
Show file tree
Hide file tree
Showing 40 changed files with 319 additions and 317 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/**
* CallQueue related settings. These are not used directly, but rather
* combined with a namespace and port. For instance:
* IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
* IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY
*/
public static final String IPC_NAMESPACE = "ipc";
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -685,8 +685,8 @@ public BlockLocation[] getFileBlockLocations(FileStatus file,
return new BlockLocation[0];

}
String[] name = { "localhost:50010" };
String[] host = { "localhost" };
String[] name = {"localhost:9866"};
String[] host = {"localhost"};
return new BlockLocation[] {
new BlockLocation(name, host, 0, file.getLen()) };
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,11 @@
*
* <p>Examples:</p>
* <p><blockquote><pre>
* $ bin/hadoop dfs -fs darwin:8020 -ls /data
* list /data directory in dfs with namenode darwin:8020
* $ bin/hadoop dfs -fs darwin:9820 -ls /data
* list /data directory in dfs with namenode darwin:9820
*
* $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
* list /data directory in dfs with namenode darwin:8020
* $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data
* list /data directory in dfs with namenode darwin:9820
*
* $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
* list /data directory in dfs with multiple conf files specified.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ Once the Hadoop cluster is up and running check the web-ui of the components as

| Daemon | Web Interface | Notes |
|:---- |:---- |:---- |
| NameNode | http://nn_host:port/ | Default HTTP port is 50070. |
| NameNode | http://nn_host:port/ | Default HTTP port is 9870. |
| ResourceManager | http://rm_host:port/ | Default HTTP port is 8088. |
| MapReduce JobHistory Server | http://jhs_host:port/ | Default HTTP port is 19888. |

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ Usage:

Get/Set the log level for a Log identified by a qualified class name in the daemon.

Example: $ bin/hadoop daemonlog -setlevel 127.0.0.1:50070 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
Example: $ bin/hadoop daemonlog -setlevel 127.0.0.1:9870 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG

Files
-----
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,15 +256,15 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
| Parameter | Value | Notes |
|:-----------------------------|:------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `dfs.http.policy` | `HTTP_ONLY` or `HTTPS_ONLY` or `HTTP_AND_HTTPS` | `HTTPS_ONLY` turns off http access. This option takes precedence over the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. If using SASL to authenticate data transfer protocol instead of running DataNode as root and using privileged ports, then this property must be set to `HTTPS_ONLY` to guarantee authentication of HTTP servers. (See `dfs.data.transfer.protection`.) |
| `dfs.namenode.https-address` | `0.0.0.0:50470` | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details. |
| `dfs.namenode.https-address` | `0.0.0.0:9871` | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details. |
| `dfs.https.enable` | `true` | This value is deprecated. `Use dfs.http.policy` |

### Secondary NameNode

| Parameter | Value | Notes |
|:------------------------------------------------------------|:-----------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `dfs.namenode.secondary.http-address` | `0.0.0.0:50090` | HTTP web UI address for the Secondary NameNode. |
| `dfs.namenode.secondary.https-address` | `0.0.0.0:50091` | HTTPS web UI address for the Secondary NameNode. |
| `dfs.namenode.secondary.http-address` | `0.0.0.0:9868` | HTTP web UI address for the Secondary NameNode. |
| `dfs.namenode.secondary.https-address` | `0.0.0.0:9869` | HTTPS web UI address for the Secondary NameNode. |
| `dfs.secondary.namenode.keytab.file` | `/etc/security/keytab/sn.service.keytab` | Kerberos keytab file for the Secondary NameNode. |
| `dfs.secondary.namenode.kerberos.principal` | `sn/[email protected]` | Kerberos principal name for the Secondary NameNode. |
| `dfs.secondary.namenode.kerberos.internal.spnego.principal` | `HTTP/[email protected]` | The server principal used by the Secondary NameNode for web UI SPNEGO authentication. The SPNEGO server principal begins with the prefix `HTTP/` by convention. If the value is `'*'`, the web server will attempt to login with every principal specified in the keytab file `dfs.web.authentication.kerberos.keytab`. For most deployments this can be set to `${dfs.web.authentication.kerberos.principal}` i.e use the value of `dfs.web.authentication.kerberos.principal`. |
Expand All @@ -286,7 +286,7 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
| `dfs.datanode.data.dir.perm` | `700` | |
| `dfs.datanode.address` | `0.0.0.0:1004` | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc. Alternatively, this must be set to a non-privileged port if using SASL to authenticate data transfer protocol. (See `dfs.data.transfer.protection`.) |
| `dfs.datanode.http.address` | `0.0.0.0:1006` | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc. |
| `dfs.datanode.https.address` | `0.0.0.0:50475` | HTTPS web UI address for the Data Node. |
| `dfs.datanode.https.address` | `0.0.0.0:9865` | HTTPS web UI address for the Data Node. |
| `dfs.datanode.kerberos.principal` | `dn/[email protected]` | Kerberos principal name for the DataNode. |
| `dfs.datanode.keytab.file` | `/etc/security/keytab/dn.service.keytab` | Kerberos keytab file for the DataNode. |
| `dfs.encrypt.data.transfer` | `false` | set to `true` when using data encryption |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ The following instructions are to run a MapReduce job locally. If you want to ex

3. Browse the web interface for the NameNode; by default it is available at:

* NameNode - `http://localhost:50070/`
* NameNode - `http://localhost:9870/`

4. Make the HDFS directories required to execute MapReduce jobs:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ You need to run the command against all servers if you want to update the config
ID CLASS
1 org.apache.htrace.core.LocalFileSpanReceiver

$ hadoop trace -list -host 192.168.56.2:50020
$ hadoop trace -list -host 192.168.56.2:9867
ID CLASS
1 org.apache.htrace.core.LocalFileSpanReceiver

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ of `getFileBlockLocations()` on a directory is []
If the filesystem is not location aware, it SHOULD return

[
BlockLocation(["localhost:50010"] ,
BlockLocation(["localhost:9866"] ,
["localhost"],
["/default/localhost"]
0, F.getLen())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,6 @@ public void testDefaultURIwithOutPort() throws Exception {

@Test
public void testDefaultURIwithPort() throws Exception {
testDefaultUriInternal("hdfs://dummyhost:8020");
testDefaultUriInternal("hdfs://dummyhost:9820");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort {
private static String TEST_FENCING_HOST = System.getProperty(
"test.TestSshFenceByTcpPort.host", "localhost");
private static final String TEST_FENCING_PORT = System.getProperty(
"test.TestSshFenceByTcpPort.port", "8020");
"test.TestSshFenceByTcpPort.port", "9820");
private static final String TEST_KEYFILE = System.getProperty(
"test.TestSshFenceByTcpPort.key");

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,12 @@ public interface HdfsClientConfigKeys {

String PREFIX = "dfs.client.";
String DFS_NAMESERVICES = "dfs.nameservices";
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 9870;
String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
"dfs.namenode.kerberos.principal";
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
Expand Down
Loading

0 comments on commit 63e5412

Please sign in to comment.