MyCAT高可用
1.准备四个虚拟机
角色 主机名 IP地址 说明
Master mha-node1 192.168.42.3 主库
Slave1 mha-node2 192.168.42.1 从库(候选主库)
Slave2 mha-node3 192.168.42.4 从库
Manager mha-manager 192.168.42.2 MHA管理节点(独立服务器)
2.基础配置(所有节点都需要做一遍)
# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
# 关闭SELinux
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 配置hosts(所有节点一致)
cat >> /etc/hosts << EOF
192.168.8.11 mha-node1
192.168.8.12 mha-node2
192.168.8.13 mha-node3
192.168.8.14 mha-manager
EOF
# 配置SSH免密登录(manager节点需免密登录所有数据库节点,数据库节点间也需互信)所有节点执行
ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa
for host in mha-node1 mha-node2 mha-node3; do
ssh-copy-id -i ~/.ssh/id_rsa.pub $host
done
3.安装mysql(使用脚本)
cd /usr/local/src
wget http://192.168.56.200/Software/mysql_install.sh
bash mysql_install.sh
4.配置主库(mha-node1)
vim /etc/my.cnf
[mysqld]
# ...
# 在默认配置下方添加:
server-id=11 # 唯一ID
log_bin=mysql-bin # 开启binlog
binlog_format=ROW # ROW模式(MHA推荐)
gtid_mode=ON # 开启GTID
enforce_gtid_consistency=ON # 强制GTID一致性
log_slave_updates=ON # 从库同步时记录binlog(用于级联复制)
skip_name_resolve=ON # 跳过域名解析
systemctl restart mysqld(重启)
5.从库配置(mha-node2/mha-node3)
# mha-node2(server-id=12)
vim /etc/my.cnf
[mysqld]
# ...
server-id=12 # 唯一ID(mha-node3设为13)
log_bin=mysql-bin
binlog_format=ROW
gtid_mode=ON
enforce_gtid_consistency=ON
log_slave_updates=ON
skip_name_resolve=ON
relay_log=relay-bin # 开启中继日志
read_only=ON # 从库只读(可选)
systemctl restart mysqld(重启)
6.搭建主从复制
在Master(mha-node1)创建复制用户
-- MySQL 8.0默认认证插件为caching_sha2_password,MHA需用mysql_native_password
CREATE USER 'repl'@'192.168.8.%' IDENTIFIED WITH mysql_native_password BY 'Repl@123';
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.8.%';
FLUSH PRIVILEGES;
在Slave(mha-node2/mha-node3)配置复制
-- 登录Slave的MySQL
CHANGE MASTER TO
MASTER_HOST='mha-node1',
MASTER_USER='repl',
MASTER_PASSWORD='Repl@123',
MASTER_PORT=3306,
MASTER_AUTO_POSITION=1; # 基于GTID自动定位
-- 启动复制
START SLAVE;
-- 检查复制状态(确保Slave_IO_Running和Slave_SQL_Running均为Yes)
SHOW SLAVE STATUS\G;
7.安装依赖(所有节点)
# 配置本地Yum源
vim /etc/yum.repos.d/CentOS-Media.repo
[c7-media]
name=CentOS-$releasever - Media
baseurl=file:///media/CentOS/
file:///media/cdrom/
file:///media/cdrecorder/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
# 挂载光盘
mount -r /dev/sr0 /media/cdrom
# 安装wget
yum install -y wget
# 配置网络Yum源
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
# 配置Yum扩展源
wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
# 安装Perl依赖(MHA基于Perl开发)
yum install -y perl-DBD-MySQL perl-CPAN perl-Config-Tiny perl-Log-Dispatch perl-Parallel-ForkManager perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker
yum install -y perl-Email-Sender perl-Email-Valid perl-Mail-Sender
8.安装MHA包
# 管理节点下载
wget http://192.168.56.200/Software/mha4mysql-manager-0.58.tar.gz
# 所有节点下载
wget http://192.168.56.200/Software/mha4mysql-node-0.58.tar.gz
9.安装MHA Node(所有节点都要)
tar -zxvf mha4mysql-node-0.58.tar.gz
cd mha4mysql-node-0.58
perl Makefile.PL
make && make install
10.安装MHA Manager (仅manager节点)
tar -zxvf mha4mysql-manager-0.58.tar.gz
cd mha4mysql-manager-0.58
perl Makefile.PL
make && make install
# 创建MHA工作目录
mkdir -p /etc/mha/mha_cluster /var/log/mha/mha_cluster
11.配置MHA
创建MHA配置文件(manager节点)
cat > /etc/mha/mha_cluster.cnf << EOF
[server default]
user=mha_user
password=Mha@123
ssh_user=root
repl_user=repl
repl_password=Repl@123
ping_interval=1
master_binlog_dir=/usr/local/mysql/data
remote_workdir=/tmp
secondary_check_script=masterha_secondary_check -s mha-node2 -s mha-node3
manager_workdir=/var/log/mha/mha_cluster
manager_log=/var/log/mha/mha_cluster/manager.log
[server1]
hostname=mha-node1
port=3306
candidate_master=0
[server2]
hostname=mha-node2
port=3306
candidate_master=1
check_repl_delay=0
[server3]
hostname=mha-node3
port=3306
candidate_master=0
EOF
12.创建MHA管理用户(所有MySQL节点)
CREATE USER 'mha_user'@'192.168.42.%' IDENTIFIED WITH mysql_native_password BY 'Mha@123';
GRANT ALL PRIVILEGES ON *.* TO 'mha_user'@'192.168.42.%';
FLUSH PRIVILEGES;
13.验证MHA配置
① 检查SSH连接(manager节点)
masterha_check_ssh --conf=/etc/mha/mha_cluster.cnf
# 输出"All SSH connection tests passed successfully."即为正常
②检查主从复制(manager节点)
masterha_check_repl --conf=/etc/mha/mha_cluster.cnf
# 输出"MySQL Replication Health is OK."即为正常
14.启动MHA Manager
# 前台启动(测试用,日志实时输出)
masterha_manager --conf=/etc/mha/mha_cluster.cnf
# 后台启动(生产用)
nohup masterha_manager --conf=/etc/mha/mha_cluster.cnf > /var/log/mha/mha_cluster/nohup.log 2>&1 &
# 检查MHA状态
masterha_check_status --conf=/etc/mha/mha_cluster.cnf
# 输出"mha_cluster (pid: xxxx) is running(0:PING_OK)"即为正常运行
15.测试故障切换
① 模拟Master故障(在mha-node1执行)
systemctl stop mysqld # 停止主库服务
② 观察故障切换(manager节点日志)
tail -f /var/log/mha/mha_cluster/manager.log
# 正常情况下,日志会显示:
# - 检测到master故障
# - 提升mha-node2为新master
# - 其他slave(mha-node3)指向新master
③验证切换结果
# 在新master(mha-node2)查看状态
mysql -uroot -p -e "SELECT @@server_id, @@read_only;"
# 应显示server_id=12,read_only=OFF
# 在mha-node3查看复制状态
mysql -uroot -p -e "SHOW SLAVE STATUS\G"
# 应显示Master_Host为mha-node2,且复制正常
16.故障恢复后处理
修复原master(mha-node1),重新安装MySQL并配置为新master(mha-node2)的从库。
重新启动MHA Manager(故障切换后Manager会自动退出):
nohup masterha_manager --conf=/etc/mha/mha_cluster.cnf > /var/log/mha/mha_cluster/nohup.log 2>&1 &