0: 设置系统登录相关
Master要执行
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
如果用root用户
sed -ri 's/^(PermitRootLogin ).*$/\1yes/' /etc/ssh/sshd_config
编辑/etc/hosts
127.0.0.1 localhost # 别把 spark1 放在这192.168.100.25 spark1 #spark1 is Master192.168.100.26 spark2192.168.100.27 spark3127.0.1.1 ubuntu# The following lines are desirable for IPv6 capable hosts::1 localhost ip6-localhost ip6-loopbackff02::1 ip6-allnodesff02::2 ip6-allrouters
如果把 spark1 放在/etc/hosts第一行, 会发现在slave 有下面的错误
org.apache.hadoop.ipc.Client: Retrying connect to server: spark1/192.168.100.25:9000. Already tried 0 time(s)
然后在spark1 运行
ss -lntLISTEN 0 128 localhost:9000
会发现监听的是本地. 删除 hosts中的相关文本重新启动hadoop,解决问题
1: 安装java
可以直接apt-get
apt-get install python-software-properties -yadd-apt-repository ppa:webupd8team/javaapt-get updateapt-get install oracle-java7-installer
或者下载
wget http://download.oracle.com/otn-pub/java/jdk/7u80-b15/jdk-7u80-linux-x64.tar.gzmkdir /usr/lib/jvmtar xvf jdk-7u80-linux-x64.tar.gzmv jdk1.7.0_80 /usr/lib/jvm# 配置相关路径update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/jdk1.7.0_80/bin/java" 1update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/jdk1.7.0_80/bin/javac" 1update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/lib/jvm/jdk1.7.0_80/bin/javaws" 1update-alternatives --config java# 验证一下java -versionjavac -versionjavaws -version
添加环境变量
cat >> /etc/profile <
2: 安装 hadoop
tar xvf hadoop-2.7.3.tar.gzmv hadoop-2.7.3 /usr/local/hadoopcd /usr/local/hadoopmkdir -p hdfs/{data,name,tmp}
添加环境变量
cat >> /etc/profile <
编辑 hadoop-env.sh 文件
export JAVA_HOME=/usr/lib/jvm/jdk1.7.0_80 #只改了这一行
编辑 core-site.xml 文件
fs.defaultFS hdfs://spark1:9000 hadoop.tmp.dir /usr/local/hadoop/hdfs/tmp
编辑 hdfs-site.xml 文件
dfs.namenode.name.dir /usr/local/hadoop/hdfs/name dfs.datanode.data.dir /usr/local/hadoop/hdfs/data dfs.replication 3
编辑 mapred-site.xml 文件
mapreduce.framework.name yarn
编辑 yarn-site.xml 文件
yarn.nodemanager.aux-services mapreduce_shuffle yarn.resourcemanager.hostname spark1
上面相关文件的具体属性及值在官网查询:
编辑 masters 文件
echo spark1 > masters
编辑 slaves 文件
spark1spark2spark3
安装好后,使用rsync 把相关目录及/etc/profile同步过去即可
启动hadoop dfs
./sbin/start-dfs.sh
初始化文件系统
hadoop namenode -format
启动 yarn
./sbin/start-yarn.sh
检查spark1相关进程
root@spark1:/usr/local/spark/conf# jps1699 NameNode8856 Jps2023 SecondaryNameNode2344 NodeManager1828 DataNode2212 ResourceManager
spark2 spark3 也要类似下面的运程
root@spark2:/tmp# jps3238 Jps1507 DataNode1645 NodeManager
可以打开web页面查看
http://192.168.100.25:50070
测试hadoop
hadoop fs -mkdir /testin hadoop fs -put ~/str.txt /testincd /usr/local/hadoophadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar wordcount /testin/str.txt testout
结果如下:
hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar wordcount /testin/str.txt testout 17/02/24 11:20:59 INFO client.RMProxy: Connecting to ResourceManager at spark1/192.168.100.25:803217/02/24 11:21:01 INFO input.FileInputFormat: Total input paths to process : 117/02/24 11:21:01 INFO mapreduce.JobSubmitter: number of splits:117/02/24 11:21:02 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1487839487040_000217/02/24 11:21:06 INFO impl.YarnClientImpl: Submitted application application_1487839487040_000217/02/24 11:21:06 INFO mapreduce.Job: The url to track the job: http://spark1:8088/proxy/application_1487839487040_0002/17/02/24 11:21:06 INFO mapreduce.Job: Running job: job_1487839487040_000217/02/24 11:21:28 INFO mapreduce.Job: Job job_1487839487040_0002 running in uber mode : false17/02/24 11:21:28 INFO mapreduce.Job: map 0% reduce 0%17/02/24 11:22:00 INFO mapreduce.Job: map 100% reduce 0%17/02/24 11:22:15 INFO mapreduce.Job: map 100% reduce 100%17/02/24 11:22:17 INFO mapreduce.Job: Job job_1487839487040_0002 completed successfully17/02/24 11:22:17 INFO mapreduce.Job: Counters: 49 File System Counters FILE: Number of bytes read=212115 FILE: Number of bytes written=661449 FILE: Number of read operations=0 FILE: Number of large read operations=0 FILE: Number of write operations=0 HDFS: Number of bytes read=377966 HDFS: Number of bytes written=154893 HDFS: Number of read operations=6 HDFS: Number of large read operations=0 HDFS: Number of write operations=2 Job Counters Launched map tasks=1 Launched reduce tasks=1 Data-local map tasks=1 Total time spent by all maps in occupied slots (ms)=23275 Total time spent by all reduces in occupied slots (ms)=11670 Total time spent by all map tasks (ms)=23275 Total time spent by all reduce tasks (ms)=11670 Total vcore-milliseconds taken by all map tasks=23275 Total vcore-milliseconds taken by all reduce tasks=11670 Total megabyte-milliseconds taken by all map tasks=23833600 Total megabyte-milliseconds taken by all reduce tasks=11950080 Map-Reduce Framework Map input records=1635 Map output records=63958 Map output bytes=633105 Map output materialized bytes=212115 Input split bytes=98 Combine input records=63958 Combine output records=14478 Reduce input groups=14478 Reduce shuffle bytes=212115 Reduce input records=14478 Reduce output records=14478 Spilled Records=28956 Shuffled Maps =1 Failed Shuffles=0 Merged Map outputs=1 GC time elapsed (ms)=429 CPU time spent (ms)=10770 Physical memory (bytes) snapshot=455565312 Virtual memory (bytes) snapshot=1391718400 Total committed heap usage (bytes)=277348352 Shuffle Errors BAD_ID=0 CONNECTION=0 IO_ERROR=0 WRONG_LENGTH=0 WRONG_MAP=0 WRONG_REDUCE=0 File Input Format Counters Bytes Read=377868 File Output Format Counters Bytes Written=154893
3: 安装 scala
tar xvf scala-2.11.8.tgzmv scala-2.11.8 /usr/local/scala
添加环境变量
cat >> /etc/profile <
测试
source /etc/profilescala -versionScala code runner version 2.11.8 -- Copyright 2002-2016, LAMP/EPFL
4: 安装 spark
tar xvf spark-2.1.0-bin-hadoop2.7.tgzmv spark-2.1.0-bin-hadoop2.7 /usr/local/spark
添加环境变量
cat >> /etc/profile <
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native#这一条不添加的话在运行 spark-shell 时会出现下面的错误NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
编辑 spark-env.sh
SPARK_MASTER_HOST=spark1HADOOP_CONF_DIR=/usr/locad/hadoop/etc/hadoop
编辑 slaves
spark1spark2spark3
启动 spark
./sbin/start-all.sh
此时在spark1上运行jps应该如下, 多了 Master 和 Worker
root@spark1:/usr/local/spark/conf# jps1699 NameNode8856 Jps7774 Master2023 SecondaryNameNode7871 Worker2344 NodeManager1828 DataNode2212 ResourceManager
spark2 和 spark3 则多了 Worker
root@spark2:/tmp# jps3238 Jps1507 DataNode1645 NodeManager3123 Worker
可以打开web页面查看
http://192.168.100.25:8080/
运行 spark-shell
root@spark1:/usr/local/spark/conf# spark-shell Using Spark's default log4j profile: org/apache/spark/log4j-defaults.propertiesSetting default log level to "WARN".To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).17/02/24 11:55:46 WARN SparkContext: Support for Java 7 is deprecated as of Spark 2.0.017/02/24 11:56:17 WARN ObjectStore: Failed to get database global_temp, returning NoSuchObjectExceptionSpark context Web UI available at http://192.168.100.25:4040Spark context available as 'sc' (master = local[*], app id = local-1487908553475).Spark session available as 'spark'.Welcome to ____ __ / __/__ ___ _____/ /__ _\ \/ _ \/ _ `/ __/ '_/ /___/ .__/\_,_/_/ /_/\_\ version 2.1.0 /_/ Using Scala version 2.11.8 (Java HotSpot(TM) 64-Bit Server VM, Java 1.7.0_80)Type in expressions to have them evaluated.Type :help for more information.scala> :help
此时可以打开spark 查看
http://192.168.100.25:4040/environment/
spark 测试
run-example org.apache.spark.examples.SparkPi17/02/28 11:17:20 INFO DAGScheduler: Job 0 finished: reduce at SparkPi.scala:38, took 3.491241 sPi is roughly 3.1373756868784346
至此完成.