Keepalived实现LVS高可用
环境准备
环境说明:LVS-DR模式
client1:eth0->192.168.88.10
lvs1:eth0->192.168.88.5
lvs2:eth0->192.168.88.6
web1:eth0->192.168.88.100
web2:eth0->192.168.88.200文章来源:https://www.toymoban.com/news/detail-676200.html
配置高可用、负载均衡
1.在2台web服务器的lo上配置vip
2.在2台web服务器上配置内核参数文章来源地址https://www.toymoban.com/news/detail-676200.html
#临时先做修改
sysctl -w net.ipv4.conf.all.arp_announce=2
sysctl -w net.ipv4.conf.eth0.arp_announce=2
sysctl -w net.ipv4.conf.all.arp_ignore=1
sysctl -w net.ipv4.conf.eth0.arp_ignore=1
#修改/etc/sysctl.conf内核配置文件
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.eth0.arp_announce = 2
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.eth0.arp_ignore = 1
通过Ansible两台LVS主机上配置keepalived
#ansible配置文件
[root@pubserver cluster]# vim ansible.cfg
[defaults]
host_key_checking = false
inventory=inventory
#配置主机清单文件
[root@pubserver cluster]# vim inventory
...略...
[lb]
lvs1 ansible_host=192.168.88.5
lvs2 ansible_host=192.168.88.6
#书写yml安装软件包
[root@pubserver cluster]# vim 11-install-lvs2.yml
---
- name: install lvs keepalived
hosts: lb
tasks:
- name: install pkgs # 安装软件包
yum:
name: ipvsadm,keepalived
state: present
#配置/etc/keepalived/keepalived.conf,两台LVS都需要配置,另一台修改state MASTER,priority 100
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id lvs1 #为本机取一个唯一的ID
vrrp_iptables #自动开启iptables放行规则
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP #状态,主为MASTER,备为BACKUP
interface eth0 #网卡名称
virtual_router_id 51 #虚拟路由地址,同一个keepalived集群须相同
priority 80 #优先级
advert_int 1 #发送心跳消息间隔
authentication {
auth_type PASS #认证类型为共享密码
auth_pass 1111 #集群中的机器密码相同,方可成为集群
}
virtual_ipaddress {
192.168.88.15/24 #VIP地址
}
}
virtual_server 192.168.88.15 80 {
delay_loop 6 #健康检查延迟6秒
lb_algo wrr #调度算法为WRR
lb_kind DR #工作模式为DR
#persistence_timeout 50 #50秒内相同客户端调度到相同服务器
protocol TCP #协议为TCP
real_server 192.168.88.100 80 { #声明真实服务器
weight 1 #权重
TCP_CHECK { #通过TCP协议对真实服务器做健康检查
connect_timeout 3 #连接超时时间为3秒
nb_get_retry 3 #3次访问失败认为真实服务器故障
delay_before_retry 3 #两次检查时间的间隔为3秒
}
}
real_server 192.168.88.200 80 {
weight 2
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
#修改完成后在两台LVS启动keepalived服务
[root@lvs1 ~]# systemctl start keepalived
# 验证
[root@lvs1 ~]# ip a s eth0 | grep 88
inet 192.168.88.5/24 brd 192.168.88.255 scope global noprefixroute eth0
inet 192.168.88.15/32 scope global eth0
[root@lvs1 ~]# ipvsadm -Ln # 出现规则
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.88.15:80 wrr persistent 50
-> 192.168.88.100:80 Route 1 0 0
-> 192.168.88.200:80 Route 2 0 0
# 在客户端验证
[root@client1 ~]# for i in {1..6}; do curl http://192.168.88.15/; done
Welcome from web2
Welcome from web1
Welcome from web2
Welcome from web2
Welcome from web1
Welcome from web2
# 1. 验证真实服务器健康检查
[root@web1 ~]# systemctl stop nginx
[root@lvs1 ~]# ipvsadm -Ln # web1在规则中消失
[root@lvs2 ~]# ipvsadm -Ln
[root@web1 ~]# systemctl start nginx
[root@lvs1 ~]# ipvsadm -Ln # web1重新出现在规则中
[root@lvs2 ~]# ipvsadm -Ln
到了这里,关于Linux学习-keepalived实现LVS高可用的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!