OpenStack HA集群4-Haproxy
1、安装haproxy
# yum install -y haproxy
# systemctl enable haproxy.service
2、配置haproxy日志
[root@controller1 ~]# cd /etc/rsyslog.d/
[root@controller1 rsyslog.d]# vim haproxy.conf
$ModLoad imudp
$UDPServerRun 514
$template Haproxy,"%msg%"
local0.=info -/var/log/haproxy.log;Haproxy
local0.notice -/var/log/haproxy-status.log;Haproxy
local0.* ~
[root@controller1 ~]# scp /etc/rsyslog.d/haproxy.conf controller2:/etc/rsyslog.d/
haproxy.conf 100% 164 0.2KB/s 00:00
[root@controller1 ~]# scp /etc/rsyslog.d/haproxy.conf controller3:/etc/rsyslog.d/
启动rsyslog
[root@controller1 ~]# systemctl restart rsyslog.service
[root@controller1 ~]# systemctl status rsyslog.service
配置haproxy
[root@controller1 ~]# vim /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
stats uri /haproxy-stats
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
listen haproxy-status
bind *:80
option http-server-close
option forwardfor
default_backend httpd
maxconn 10
stats refresh 30s
stats hide-version
stats auth admin:admin
stats uri /haproxy-stats
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
listen httpd
balance roundrobin
mode http
option httplog
# option httpchk HEAD / HTTP/1.1\r\nHost:\ localhost
# option httpchk GET /index.html
server controller1 controller1:8080 check
server controller2 controller2:8080 check
server controller3 controller3:8080 check
listen mysql
balance roundrobin
mode http
option httplog
server controller1 controller1:3306 check
server controller2 controller2:3306 check
server controller3 controller3:3306 check
listen keystone_api_cluster
# bind 192.168.17.132:5000
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:5000 check inter 2000 rise 2 fall 5
server controller2 controller2:5000 check inter 2000 rise 2 fall 5
server controller3 controller3:5000 check inter 2000 rise 2 fall 5
listen cinder_api_cluster
bind 192.168.17.132:8776
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:8778 check inter 2000 rise 2 fall 5
server controller2 controller2:8778 check inter 2000 rise 2 fall 5
server controller3 controller3:8778 check inter 2000 rise 2 fall 5
listen nova_vncproxy_cluster
bind 192.168.17.132:6080
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:6081 check inter 2000 rise 2 fall 5
server controller2 controller2:6081 check inter 2000 rise 2 fall 5
server controller3 controller3:6081 check inter 2000 rise 2 fall 5
listen neutron_api_cluster
bind 192.168.17.132:9696
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:9797 check inter 2000 rise 2 fall 5
server controller2 controller2:9797 check inter 2000 rise 2 fall 5
server controller3 controller3:9797 check inter 2000 rise 2 fall 5
[root@controller1 ~]# scp /etc/haproxy/haproxy.cfg controller2:/etc/haproxy/
[root@controller1 ~]# scp /etc/haproxy/haproxy.cfg controller3:/etc/haproxy/
[root@controller1 ~]# systemctl restart haproxy
[root@controller1 ~]# systemctl status haproxy
本文转自 OpenStack2015 博客,原文链接: http://blog.51cto.com/andyliu/1917398 如需转载请自行联系原作者

低调大师中文资讯倾力打造互联网数据资讯、行业资源、电子商务、移动互联网、网络营销平台。
持续更新报道IT业界、互联网、市场资讯、驱动更新,是最及时权威的产业资讯及硬件资讯报道平台。
转载内容版权归作者及来源网站所有,本站原创内容转载请注明来源。
- 上一篇
安全组规则跨region导入导出功能介绍
功能介绍 您可以在控制台clone一个安全组,进而快速创建安全组和添加安全组规则,但这个功能有个限制是无法跨region使用。为此ECS控制台开发了安全组规则导入导出功能,使用这个功能您可以把一个安全组下所有规则导出成json文件,这样就可以方便地在另外一个region的安全组内导入这些规则,进而实现跨region快速设置安全组规则的目的。 使用方法 导出规则 找到要导出规则的安全组并进入规则配置详情页 点“导出全部规则”按钮完成规则导出,截图如下: 导出成功后,会在您本地看到保存当前安全组所有规则的json文件,此文件命名规则如下:ecs_${region_id}_${安全组ID}.json 假设region_id是cn-qingdao, 安全组ID是sg-123,那么文件名是 ecs_cn-qingdao_sg-123.json 导入规则
- 下一篇
hadoop cluster decommission node (下线节点,超级实用)
一、描述 为了节约成本,避免资源浪费,下线集群中的一个节点,也就是把一台云主机回收喽。 centos 6.6_64bit hadoop 2.6.0 二、操作步骤(动态下线) 下线节点主机名如下,在hadoop用户下操作,配置文件都在conf目录下 host-10-10-10-10 ##一看就在云上是不是 1.在conf目录下创建文件 touchexcludes echo "host-10-10-10-10" > exclude less exclude ##要有验证 2.修改配置文件hdfs-site.conf vi hdfs-site.xml 添加如下内容,路径根据自己的实际情况 <property> <name>dfs.hosts.exclude</name> <value>/usr/local/RoilandGroup/hadoop-2.6.0/etc/hadoop/excludes</value> </property> 3.修改配置文件 yarn-site.conf 添加如下内容,路径根据自...
相关文章
文章评论
共有0条评论来说两句吧...
文章二维码
点击排行
推荐阅读
最新文章
- Hadoop3单机部署,实现最简伪集群
- MySQL8.0.19开启GTID主从同步CentOS8
- Springboot2将连接池hikari替换为druid,体验最强大的数据库连接池
- Docker快速安装Oracle11G,搭建oracle11g学习环境
- SpringBoot2更换Tomcat为Jetty,小型站点的福音
- Docker安装Oracle12C,快速搭建Oracle学习环境
- CentOS关闭SELinux安全模块
- CentOS7,8上快速安装Gitea,搭建Git服务器
- CentOS8安装Docker,最新的服务器搭配容器使用
- SpringBoot2配置默认Tomcat设置,开启更多高级功能