hadoop-0.20.1部署手记
| Hadoop-0.19.0 | Hadoop-0.20.1 |
| ll hadoop-0.19.0/conf/ | ll hadoop-0.20.1/conf/ |
| total 92-rw-rw-r-- 1 root root 2065 Nov 14 2008 capacity-scheduler.xml-rw-rw-r-- 1 root root 535 Nov 14 2008 configuration.xsl-rw-rw-r-- 1 root root 49456 Nov 14 2008 hadoop-default.xml-rw-rw-r-- 1 root root 2237 Nov 14 2008 hadoop-env.sh-rw-rw-r-- 1 root root 1245 Nov 14 2008 hadoop-metrics.properties-rw-rw-r-- 1 root root 178 Nov 14 2008 hadoop-site.xml-rw-rw-r-- 1 root root 2815 Nov 14 2008 log4j.properties-rw-rw-r-- 1 root root 10 Nov 14 2008 masters-rw-rw-r-- 1 root root 10 Nov 14 2008 slaves-rw-rw-r-- 1 root root 401 Nov 14 2008 sslinfo.xml.example | total 56-rw-rw-r-- 1 root root 3936 Sep 2 04:44 capacity-scheduler.xml-rw-rw-r-- 1 root root 535 Sep 2 04:44 configuration.xsl-rw-rw-r-- 1 root root 178 Sep 2 04:44 core-site.xml-rw-rw-r-- 1 root root 2237 Sep 2 04:44 hadoop-env.sh-rw-rw-r-- 1 root root 1245 Sep 2 04:44 hadoop-metrics.properties-rw-rw-r-- 1 root root 4190 Sep 2 04:44 hadoop-policy.xml-rw-rw-r-- 1 root root 178 Sep 2 04:44 hdfs-site.xml-rw-rw-r-- 1 root root 2815 Sep 2 04:44 log4j.properties-rw-rw-r-- 1 root root 178 Sep 2 04:44 mapred-site.xml-rw-rw-r-- 1 root root 10 Sep 2 04:44 masters
-rw-rw-r-- 1 root root 10 Sep 2 04:44 slaves
-rw-rw-r-- 1 root root 1243 Sep 2 04:44 ssl-client.xml.example
-rw-rw-r-- 1 root root 1195 Sep 2 04:44 ssl-server.xml.example
|
|
<property>
<name>fs.default.name</name>
<value>
hdfs://hadoopm:9000</value>
<description>The name of the default file system. Either the literal string "local" or a host:port for DFS.</description>
</property>
<property>
<name>mapred.job.tracker</name>
<value>hadoopm:9001</value>
<description>The host and port that the MapReduce job tracker runs at. If "local", then jobs are run in-process as a single map and
reduce task.</description>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/disk2/hadoop/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>dfs.name.dir</name>
<value>/disk2/hadoop/filesystem/name</value>
<description>Determines where on the local filesystem the DFS name node should store the name table. If this is a comma-delimited list of directories then the name table is replicated in all of the directories, for redundancy. </description>
</property>
<property>
<name>dfs.data.dir</name>
<value>/disk2/hadoop/filesystem/data</value>
<description>Determines where on the local filesystem an DFS data node should store its blocks. If this is a comma-delimited list of directories, then data will be stored in all named directories, typically on different devices. Directories that do not exist are i
gnored.</description>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication. The actual number of replications can be specified when the file is created. The default isused if replication is not specified in create time.</description>
</property>
|
| STARTUP_MSG: Starting DataNodeSTARTUP_MSG: host = hadoop-s2/127.0.0.1STARTUP_MSG: args = []STARTUP_MSG: version = 0.20.1STARTUP_MSG: build = http://svn.apache.org/repos/asf/hadoop/common/tags/release-0.20.1-rc1 -r 810220; compiled by 'oom' on Tue Sep 1 20:55:56 UTC 2009************************************************************/2009-10-15 10:16:01,592 INFO org.apache.hadoop.ipc.Client: Retrying connect to server: hadoop-m/192.168.199.135:9000. Already tried 0 time(s).2009-10-15 10:16:02,594 INFO org.apache.hadoop.ipc.Client: Retrying connect to server: hadoop-m/192.168.199.135:9000. Already tried 1 time(s).………………………………………(省略)……………… |
| 名称节点未执行start-all.sh数据节点相关目录的情况 | 名称节点执行start-all.sh数据节点相关目录的情况 |
| [root@hadoop-s2 conf]# ll /disk2total 16drwx------ 2 hadoop hadoop 16384 Oct 13 19:53 lost+found | [root@hadoop-s2 conf]# ll /disk2total 20drwxrwxr-x 3 hadoop hadoop 4096 Oct 16 09:42 hadoopdrwx------ 2 hadoop hadoop 16384 Oct 13 19:53 lost+found |
| [hadoop@hadoop-m ~]$ hadoop dfsadmin -reportConfigured Capacity: 984509087744 (916.9 GB)Present Capacity: 933221065008 (869.13 GB)DFS Remaining: 932155265024 (868.14 GB)DFS Used: 1065799984 (1016.43 MB)DFS Used%: 0.11%
-------------------------------------------------
Datanodes available: 1 (1 total, 0 dead)
Name: 192.168.193.137:50010
Decommission Status : Normal
Configured Capacity: 984509087744 (916.9 GB)
DFS Used: 1065799984 (1016.43 MB)
Non DFS Used: 51288022736 (47.77 GB)
DFS Remaining: 932155265024(868.14 GB)
DFS Used%: 0.11%
DFS Remaining%: 94.68%
Last contact: Thu Oct 15 15:56:07 CST 2009
|
| [hadoop@hadoop-m ~]$ hadoop dfs -ls seryFound 2 items-rw-r--r-- 1 hadoop supergroup 523335680 2009-10-15 15:52 /7.0-RELEASE-amd64-disc1.iso-rw-r--r-- 1 hadoop supergroup 534177792 2009-10-15 15:52 /7.0-RELEASE-i386-disc1.iso |
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<name>fs.default.name</name>
<value>hdfs://192.168.0.135:9000</value>
<description>The name of the default file system. Either the literal string "local" or a host:port for DFS.</description>
</property>
<name>mapred.job.tracker</name>
<value>hadoop-m:9001</value>
<description>The host and port that the MapReduce job tracker runs at. If "local", then jobs are run in-process as a single map an
d reduce task.</description>
</property>
<name>hadoop.tmp.dir</name>
<value >/disk2/hadoop/filesystem/tmp,/disk3/hadoop/filesystem/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<name>dfs.name.dir</name>
<value>/ disk2/hadoop/filesystem/name,/disk3/hadoop/filesystem/name</value>
<description>Determines where on the local filesystem the DFS name node should store the name table. If this is a comma-delimited
list of directories then the name table is replicated in all of the directories, for redundancy. </description>
</property>
<name>dfs.data.dir</name>
<value >/disk2/hadoop/filesystem/data,/disk3/hadoop/filesystem/data</value>
<description>Determines where on the local filesystem an DFS data node should store its blocks. If this is a comma-delimited list
of directories, then data will be stored in all named directories, typically on different devices. Directories that do not exist are
ignored.</description>
</property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication. The actual number of replications can be specified when the file is created. The default i
s used if replication is not specified in create time.</description>
</property>