$ vim /data/hadoop/etc/hadoop/yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<property>
<name>yarn.acl.enable</name>
<value>true</value>
</property>
<property>
<name>yarn.admin.acl</name>
<value>*</value>
</property>
<!--日志聚合功能-->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!--在HDFS上聚合的日志最长保留多少秒。3天-->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>259200</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>hadoop-test</value>
</property>
<!--rm失联后重新链接的时间-->
<property>
<name>yarn.resourcemanager.connect.retry-interval.ms</name>
<value>2000</value>
</property>
<!-- 为了能够运行MapReduce程序,需要让各个NodeManager在启动时加载shuffle server,shuffle server实际上是Jetty/Netty Server,Reduce Task通过该server从各个NodeManager上远程拷贝Map Task产生的中间结果。下面增加的两个配置均用于指定shuffle serve。 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--启用resourcemanager ha-->
<!--是否开启RM ha,默认是开启的-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
<value>true</value>
</property>
<!--指定rm的名字-->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<!--Ha功能,需要一组zk地址,用逗号分隔。被ZKFailoverController使用于自动失效备援failover。 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>192.168.233.17:2181,192.168.233.238:2181,192.168.233.157:2181</value>
</property>
<!--开启故障自动切换-->
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!--指定rm的地址-->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>192.168.233.65</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>192.168.233.94</value>
</property>
<!--使用ZK集群保存状态信息,指定zookeeper队列 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>192.168.233.17:2181,192.168.233.238:2181,192.168.233.157:2181</value>
</property>
<!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--配置与zookeeper的连接地址,被RM用于状态存储的ZooKeeper服务器的主机:端口号,多个ZooKeeper的话使用逗号分隔。 -->
<property>
<name>yarn.resourcemanager.zk-state-store.address</name>
<value>192.168.233.17:2181,192.168.233.238:2181,192.168.233.157:2181</value>
</property>
<!--指定resourcemanager的状态信息存储在zookeeper集群,默认是存放在FileSystem里面。-->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<!--schelduler失联等待连接时间-->
<property>
<name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
<value>5000</value>
</property>
<!--配置rm1-->
<!-- 客户端通过该地址向RM提交对应用程序操作 -->
<property>
<name>yarn.resourcemanager.address.rm1</name>
<value>192.168.233.65:8132</value>
</property>
<!--ResourceManager 对ApplicationMaster暴露的访问地址。ApplicationMaster通过该地址向RM申请资源、释放资源等。 -->
<property>
<name>yarn.resourcemanager.scheduler.address.rm1</name>
<value>192.168.233.65:8130</value>
</property>
<!-- RM HTTP访问地址,查看集群信息-->
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>192.168.233.65:8188</value>
</property>
<!-- NodeManager通过该地址交换信息 -->
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm1</name>
<value>192.168.233.65:8131</value>
</property>
<!--管理员通过该地址向RM发送管理命令 -->
<property>
<name>yarn.resourcemanager.admin.address.rm1</name>
<value>192.168.233.65:8033</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rm1</name>
<value>192.168.233.65:23142</value>
</property>
<!--配置rm2-->
<property>
<name>yarn.resourcemanager.address.rm2</name>
<value>192.168.233.94:8132</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm2</name>
<value>192.168.233.94:8130</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>192.168.233.94:8188</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm2</name>
<value>192.168.233.94:8131</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm2</name>
<value>192.168.233.94:8033</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rm2</name>
<value>192.168.233.94:23142</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!--资源配置-->
<property>
<name>yarn.scheduler.fair.preemption</name>
<value>true</value>
<description>开启资源抢占,default is True</description>
</property>
<!--当应用程序未指定队列名时,是否指定用户名作为应用程序所在的队列名。如果设置为false或者未设置,所有未知队列的应用程序将被提交到default队列中,默认值为true。-->
<property>
<name>yarn.scheduler.fair.user-as-default-queue</name>
<value>true</value>
<description>default is True</description>
</property>
<!--是否允许创建未定义的资源池。如果设置成true,yarn将会自动创建任务中指定的未定义过的资源池。设置成false之后,任务中指定的未定义的资源池将无效,该任务会被分配到default资源池中。,default is True-->
<property>
<name>yarn.scheduler.fair.allow-undeclared-pools</name>
<value>false</value>
</property>
<!-- 单个任务container可申请的最少物理内存量,默认是1024(MB),如果一个任务申请的物理内存量少于该值,则该对应的值改为这个数 -->
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>512</value>
</property>
<!-- 单个任务container可申请的最多物理内存量,默认是8192(MB)。默认情况下,YARN采用了线程监控的方法判断任务是否超量使用内存,一旦发现超量,则直接将其杀死。由于Cgroup对内存的控制缺乏灵活性(即任务任何时刻不能超过内存上限,如果超过,则直接将其杀死或者报OOM),而Java进程在创建瞬间内存将翻倍,之后骤降到正常值,这种情况下,采用线程监控的方式更加灵活(当发现进程树内存瞬间翻倍超过设定值时,可认为是正常现象,不会将任务杀死),因此YARN未提供Cgroups内存隔离机制 -->
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>4096</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-vcores</name>
<value>1</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-vcores</name>
<value>4</value>
</property>
<property>
<name>yarn.scheduler.increment-allocation-vcores</name>
<value>1</value>
</property>
<property>
<name>yarn.scheduler.increment-allocation-mb</name>
<value>512</value>
</property>
<!--yarn提交应用时,为单独一个应用设置最大重试次数-->
<property>
<name>yarn.resourcemanager.am.max-attempts</name>
<value>2</value>
</property>
<property>
<name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name>
<value>600000</value>
</property>
<property>
<name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name>
<value>1000</value>
</property>
<property>
<name>yarn.nm.liveness-monitor.expiry-interval-ms</name>
<value>600000</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.client.thread-count</name>
<value>50</value>
</property>
<!-- 表示该节点上YARN可使用的物理内存总量,默认是8192(MB),注意,如果节点内存资源不够8GB,则需要调减小这个值。-->
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>6000</value>
<discription>每个节点可用内存,单位MB</discription>
</property>
<!-- 表示该节点上YARN可使用的虚拟CPU个数,默认是8,注意,目前推荐将该值设值为与物理CPU核数数目相同。如果节点CPU核数不够8个,则需要调减小这个值。-->
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>2</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
</property>
<!--RM中保留的最大的已完成的任务信息数量-->
<property>
<name>yarn.resourcemanager.max-completed-applications</name>
<value>10000</value>
</property>
<!--故障处理类,以轮训方式寻找活动的RM所使用的类-->
<property>
<name>yarn.client.failover-proxy-provider</name>
<value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
<value>/yarn-leader-election</value>
</property>
</configuration>
$ vim /data/hadoop/etc/hadoop/fair-scheduler.xml
$ cat /data/hadoop/etc/hadoop/fair-scheduler.xml
<?xml version="1.0"?>
<allocations>
<userMaxAppsDefault>30</userMaxAppsDefault>
<queue name="root">
<!--最小资源-->
<minResources>5120mb,5vcores</minResources>
<!--最大资源-->
<maxResources>29000mb,10vcores</maxResources>
<maxRunningApps>100</maxRunningApps>
<weight>1.0</weight>
<schedulingMode>DRF</schedulingMode>
<!--允许提交任务的用户名和组-->
<aclSubmitApps> </aclSubmitApps>
<!--允许管理任务的用户名和组-->
<aclAdministerApps> </aclAdministerApps>
<queue name="users" type="parent">
<minResources>10000mb,2vcores</minResources>
<maxResources>15000mb,6vcores</maxResources>
<maxRunningApps>50</maxRunningApps>
<weight>3</weight>
<schedulingPolicy>fair</schedulingPolicy>
<aclSubmitApps>hadoop,hdfs</aclSubmitApps>
<aclAdministerApps>hadoop</aclAdministerApps>
</queue>
<queue name="default" type="parent">
<minResources>1000mb,1vcores</minResources>
<maxResources>2000mb,2vcores</maxResources>
<maxRunningApps>50</maxRunningApps>
<weight>3</weight>
<schedulingPolicy>fair</schedulingPolicy>
<aclSubmitApps>hadoop</aclSubmitApps>
<aclAdministerApps>hadoop</aclAdministerApps>
</queue>
<queue name="prod">
<minResources>1000mb,1vcores</minResources>
<maxResources>10000mb,4vcores</maxResources>
<maxRunningApps>50</maxRunningApps>
<weight>3</weight>
<schedulingPolicy>fair</schedulingPolicy>
<aclSubmitApps>hadoop,hdfs</aclSubmitApps>
<aclAdministerApps>hadoop</aclAdministerApps>
</queue>
<queueMaxResourcesDefault>20000mb,16vcores</queueMaxResourcesDefault>
</queue>
<queuePlacementPolicy>
<rule name="specified" />
<rule name="primaryGroup" create="false" />
<rule name="nestedUserQueue">
<rule name="secondaryGroupExistingQueue" create="false" />
</rule>
<rule name="default" queue="users"/>
</queuePlacementPolicy>
</allocations>
测试prod资源池
$ spark-shell --master yarn --master yarn --queue prod --executor-memory 1000m --total-executor-cores 1
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Spark context Web UI available at http://hadoop-test-1:4040
Spark context available as 'sc' (master = yarn, app id = application_1592814747219_0002).
Spark session available as 'spark'.
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/___/ .__/\_,_/_/ /_/\_\ version 2.4.6
/_/
Using Scala version 2.11.12 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_231)
Type in expressions to have them evaluated.
Type :help for more information.
scala>
测试users父级资源池
$ spark-shell --master yarn --master yarn --queue root.users.hadoop --executor-memory 3000m --total-executor-cores 3
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Spark context Web UI available at http://hadoop-test-2:4040
Spark context available as 'sc' (master = yarn, app id = application_1592814747219_0003).
Spark session available as 'spark'.
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/___/ .__/\_,_/_/ /_/\_\ version 2.4.6
/_/
Using Scala version 2.11.12 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_231)
Type in expressions to have them evaluated.
Type :help for more information.
scala>
**粗体** _斜体_ [链接](http://example.com) `代码` - 列表 > 引用
。你还可以使用@
来通知其他用户。