keepalived配置文件介绍与示例

keepalived配置文件介绍与示例

配置文件示例

把默认路径的配置文件,和编译安装目录下的模板配置文件,都删除了,keepalived仍然能正常启停,那编译安装的keepalived启动时读取的哪个配置文件?

[root@lvs keepalived]# cat /etc/sysconfig/keepalived 
...

KEEPALIVED_OPTIONS="-D -f /usr/local/keepalived/etc/keepalived/keepalived.conf"

启动测试

[root@lvs keepalived]# service keepalived start
Starting keepalived (via systemctl):                       [  OK  ]
[root@lvs keepalived]# service keepalived status
● keepalived.service - SYSV: Start and stop Keepalived
   Loaded: loaded (/etc/rc.d/init.d/keepalived; bad; vendor preset: disabled)
   Active: active (running) since Thu 2020-09-10 17:51:54 CST; 5s ago
     Docs: man:systemd-sysv-generator(8)
  Process: 3746 ExecStart=/etc/rc.d/init.d/keepalived start (code=exited, status=0/SUCCESS)
 Main PID: 3753 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─3753 /usr/local/keepalived/sbin/keepalived -D
           ├─3755 /usr/local/keepalived/sbin/keepalived -D
           └─3756 /usr/local/keepalived/sbin/keepalived -D

自定义日志路径


[root@lvs keepalived]# mkdir /usr/local/keepalived/log
[root@lvs keepalived]# vim /etc/sysconfig/keepalived 
# --log-detail         -D    Detailed log messages.
# --log-facility       -S    0-7 Set local syslog facility (default=LOG_DAEMON)
#
KEEPALIVED_OPTIONS="-D -S 0"
# 加上 -S 0选项,表示用本地的local0设置记录日志

[root@lvs keepalived]# vim /etc/rsyslog.conf
# Save boot messages also to boot.log
local7.*                                                /var/log/boot.log
local0.*                                                /usr/local/keepalived/log/keepalived.log
# 在local7下,加入一行local0相关,表示将local0设备日志记录到文件/usr/local/keepalived/log/keepalived.log


# 重启rsyslog服务,再重启keepalived,发现对应日志已经记录在单独日志里了
[root@lvs keepalived]# systemctl restart rsyslog
[root@lvs keepalived]# service keepalived restart


[root@lvs ~]# ll  /usr/local/keepalived/log/keepalived.log 
-rw------- 1 root root 3981 Sep 10 18:15 /usr/local/keepalived/log/keepalived.log


# 此时系统日志仍然会记录一份!

配置文件

注意:

​ 排除之前手动配置ipvs规则,或利用ldirectd服务加载配置文件,和ipvsadm服务加载规则文件的干扰

[root@lvs keepalived]# ll /etc/sysconfig/ipvsadm
-rw-r--r-- 1 root root 0 Sep 10 17:39 /etc/sysconfig/ipvsadm
[root@lvs keepalived]# ll /etc/ha.d/ldirectord.cf 
-rw-r--r-- 1 root root 8232 Sep 10 15:31 /etc/ha.d/ldirectord.cf

# 这2个文件如果存在,且有规则配置,不必清理,将对应的服务停止,开机不启动,清理现有ipvs规则即可

[root@lvs keepalived]# systemctl stop ipvsadm
[root@lvs keepalived]# systemctl disable ipvsadm
Removed symlink /etc/systemd/system/multi-user.target.wants/ipvsadm.service.

[root@lvs keepalived]# systemctl stop ldirectord
[root@lvs keepalived]# systemctl disable ldirectord

[root@lvs keepalived]# ipvsadm -C
[root@lvs keepalived]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn

查看帮助

[root@lvs keepalived]# man keepalived.conf

配置文件示例

​ 下面是自带的一个示例配置文件

cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

# 全局配置段,报警邮件相关
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1 # 邮件服务器
   smtp_connect_timeout 30 # 邮件服务器连接超时时长
   router_id LVS_DEVEL # keepalived服务器的标识,各个keepalived服务器唯一!
   

}

# vvrp协议配置段
# 一个虚拟路由器,是由一主,多备共同组成的高可用虚拟路由器,每个节点上的keepalived都是一个vvrp实例,
# 下面的vvrp实例定义了;
# 我这个实例是主角色
# vvrp协议通信端口是eth0
# 我所在的vrouter id是51,vrouter是51的其他实例和我共同组成了一个vrouter
# 我竞争主角色时优先级是100,优先级大于state定义,比如我定义成主,但是别人优先级是120,那我也只能当从
# 通信广播间隔1s
# 成员认证密码是1111
# 定义了3个虚拟ip

vrrp_instance VI_1 {
    state MASTER # 表示节点是主,
    interface eth0 
    virtual_router_id 51 # 虚拟路由器的id,一个虚拟路由器下的多个keepalived实例需要配置同一个router id
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.200.16
        192.168.200.17
        192.168.200.18
    }
    
    track_interface{ # 配置用来检测的接口,其中定义的接口,如eth1 down掉,则keepalived转为fault状态;
    eth0
    eth1
    eth...
   }
}


# 虚拟服务配置段,相当于ipvsadm配置的 -A vip:port 这部分
# 定义了vip和服务端口
# rs健康检测间隔6s
# rs调度算法 rr
# ipvs工作模型是nat
# vip掩码 24位
# 启用会话保持 50s,
# 大致相当于ipvsadm -A -t 192.168.200.100:443 -m -s rr -p 50
virtual_server 192.168.200.100 443 {
    delay_loop 6
    lb_algo rr
    lb_kind NAT
    nat_mask 255.255.255.0
    persistence_timeout 50
    protocol TCP
   
# vs背后定义了一个rs服务器
# 定义ip和端口
# 权重1
# 健康监测用https流量的get方法,
# 定义了2个监测页面
# 判断方法用页面的hash值和digest对比,看是否一致,也可以用status_code
# 大致相当 ipvsadm -a -t 192.168.200.100:443 -r 192.168.201.100:443 -w 1
# 连接超时3s
# 连续3次失败,认为该rs不可用
# 每次检查失败3s后再重试,即不考虑超时,9s后才能得知一个rs不可用
    real_server 192.168.201.100 443 {
        weight 1
        SSL_GET {
            url {
              path /
              digest ff20ad2481f97b1754ef3e12ecd3a9cc
            }
            url {
              path /mrtg/
              digest 9b3a0c85a887a256d6939da88aabd8cd
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

# 又在10.10.10.2的1358上,定义了一个ipvs集群服务
# 后端有2个rs
# 采用http流量的get检查健康性
# 定义了个sorry server 192.168.200.200 1358
virtual_server 10.10.10.2 1358 {
    delay_loop 6
    lb_algo rr 
    lb_kind NAT
    persistence_timeout 50
    protocol TCP

    sorry_server 192.168.200.200 1358

    real_server 192.168.200.2 1358 {
        weight 1
        HTTP_GET {
            url { 
              path /testurl/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl2/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl3/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }

    real_server 192.168.200.3 1358 {
        weight 1
        HTTP_GET {
            url { 
              path /testurl/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334c
            }
            url { 
              path /testurl2/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334c
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

keepalived状态变化通知脚本

​ keepalived支持配置,调用自定义的状态变化通知脚本,当keepalived的状态变化时,会触发响应脚本的执行,一般为发送通知邮件操作,变化一般有3个状态,变为主、变为从、变为宕机;

示例

# 在vvrp_instance配置段中定义

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.200.16
        192.168.200.17
        192.168.200.18
    }
    
    notify_master "/etc/keepalived/notify.sh master"
    notify_backup "/etc/keepalived/notify.sh backup"
    notify_fault "/etc/keepalived/notify.sh fault"
}

# 实验示例,在vvrp实例配置段,添加下面3行;
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 51
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.80.200/24 dev eth0 label eth0:1
    }



        notify_master "/etc/keepalived/notify.sh master"
        notify_backup "/etc/keepalived/notify.sh backup"
        notify_fault "/etc/keepalived/notify.sh fault"
}

# 创建脚本如下,一般为发送通知邮件,此处改为创建文件
[root@host2 ~]# cat /etc/keepalived/notify.sh 
#!/bin/bash

notify () {
	touch /tmp/$1
	

}

case $1 in 
	master)
 		notify master
		;;
	backup)
		notify backup
		;;
	fault)
		notify fault
		;;
	*)
		echo "usage: $(basename $0) (master|backup|fault)"
		exit 1
		;;
	esac

rs节点状态变化通知脚本

​ 在各个rs节点的配置段内部,可以定义rs状态变化的通知脚本,

​ 详细,参见:https://keepalived.org/manpage.html

示例

# 在rs配置段内部定义
real_server <IPADDR> <PORT>    {  
	weight <INT>     RS权重 
    notify_up <STRING>|<QUOTED-STRING>  RS上线通知脚本  
    notify_down <STRING>|<QUOTED-STRING> RS下线通知脚本  		     HTTP_GET|SSL_GET|TCP_CHECK|SMTP_CHECK|MISC_CHECK 
    { ... }:定义当前主机的 健康状态检测方法 
    
    }

updatedupdated2020-10-212020-10-21
加载评论