当前位置: 首页 > news >正文

如何启动spark

解决:spark的bin目录下,无法启动spark问题

[root@hadoop7 sbin]# ./start-all.sh
./start-all.sh:行29: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/spark-config.sh: 没有那个文件或目录
./start-all.sh:行32: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/start-master.sh: 没有那个文件或目录
./start-all.sh:行35: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/start-slaves.sh: 没有那个文件或目录
 


[root@hadoop7 ~]# jps
1357 Jps
[root@hadoop7 ~]# start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [hadoop7]
hadoop7: starting namenode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-namenode-hadoop7.out
localhost: starting datanode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-datanode-hadoop7.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-secondarynamenode-hadoop7.out
starting yarn daemons
starting resourcemanager, logging to /root/install/hadoop-2.7.7/logs/yarn-root-resourcemanager-hadoop7.out
localhost: starting nodemanager, logging to /root/install/hadoop-2.7.7/logs/yarn-root-nodemanager-hadoop7.out
[root@hadoop7 ~]# jps
2850 Jps
1763 DataNode
2515 NodeManager
1564 NameNode
2268 ResourceManager
2014 SecondaryNameNode
[root@hadoop7 ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /root/install/zookeeper-3.4.14/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
 


[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd conf/
[root@hadoop7 conf]# ls
docker.properties.template  hive-site.xml              metrics.properties.template  slaves.template               spark-env.sh
fairscheduler.xml.template  log4j.properties.template  slaves                       spark-defaults.conf.template  spark-env.sh.template
[root@hadoop7 conf]# vi spark-env.sh   //把hadoop6 改为了hadoop7
[root@hadoop7 conf]# 
[root@hadoop7 conf]# cd ..
[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd logs/
[root@hadoop7 logs]# ls
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out    spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.1  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.2  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.2
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.3  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.3
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.4  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out    spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.1  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.2
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.2  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.3
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.3  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.4
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.4  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.5
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.5  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop6.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop6.out    spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop6.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop6.out.1
[root@hadoop7 logs]# rm -rf *
[root@hadoop7 logs]# cd ..
[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd sbin/
[root@hadoop7 sbin]# ls
slaves.sh         start-all.sh               start-mesos-shuffle-service.sh  start-thriftserver.sh   stop-mesos-dispatcher.sh       stop-slaves.sh
spark-config.sh   start-history-server.sh    start-shuffle-service.sh        stop-all.sh             stop-mesos-shuffle-service.sh  stop-thriftserver.sh
spark-daemon.sh   start-master.sh            start-slave.sh                  stop-history-server.sh  stop-shuffle-service.sh
spark-daemons.sh  start-mesos-dispatcher.sh  start-slaves.sh                 stop-master.sh          stop-slave.sh
[root@hadoop7 sbin]# ./start-all.sh 
starting org.apache.spark.deploy.master.Master, logging to /root/install/spark-2.4.5-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.master.Master-1-hadoop7.out
localhost: starting org.apache.spark.deploy.worker.Worker, logging to /root/install/spark-2.4.5-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop7.out

就启动成功了
 

相关文章:

  • 质因数之和-蓝桥20249
  • 纸质包装盒纸箱包裹损坏缺陷检测数据集VOC+YOLO格式2397张2类别
  • C++二分查找
  • sysfs 设备模型
  • 人工智能图像识别Spark Core3
  • Mysql中的数据类型和语句概述
  • 【力扣hot100题】(083)完全平方数
  • 系统性能信息模块-psutil
  • Java中LocalDateTime类
  • freertos低功耗模式简要概述
  • 【愚公系列】《高效使用DeepSeek》065-全球物流预警
  • flutter 获取通话记录和通讯录
  • Webstorm 常用插件及便携设置
  • C语言 内存管理
  • .NET MAUI教程2-利用.NET CommunityToolkit.Maui框架弹Toast
  • Array.every() 和 Array.some()用于数组条件判断的方法,它们的核心区别在于判断逻辑和短路行为
  • LeetCode算法题(Go语言实现)_39
  • 【LaTeX】安装
  • leetcode-419.棋盘上的战舰
  • 报错:mount: unknown filesystem type ‘vfat’
  • 创建一个公司需要什么/提升seo排名
  • 起点签约的书网站给做封面吗/百度推广费用
  • 用国外服务器做赌博网站/平面设计培训
  • 北京企业信息/搜索引擎优化中的步骤包括
  • 小型手机网站建设/百度seo排名培训 优化
  • 公司官方网站建站/网络销售怎么找客源