目 录CONTENT

文章目录

keepalive-harproxy负载

xlong
2024-03-24 / 0 评论 / 0 点赞 / 30 阅读 / 14460 字 / 正在检测是否收录...

keeapplived

docker 安装keepalived

# https://github.com/osixia/docker-keepalived
​
# 添加ipvs内核模块
modprobe ip_vs
#lsmod |grep ip_vs
​
​
# 主节点配置
docker run --name keepalived01 --cap-add=NET_ADMIN --cap-add=NET_BROADCAST --cap-add=NET_RAW --net=host -e KEEPALIVED_INTERFACE=ens32 -e KEEPALIVED_STATE=MASTER -e KEEPALIVED_VIRTUAL_IPS=192.168.1.241   -d osixia/keepalived:2.0.20
​
# 备节点配置
docker run --name keepalived02 --cap-add=NET_ADMIN --cap-add=NET_BROADCAST --cap-add=NET_RAW --net=host -e KEEPALIVED_INTERFACE=ens32 -e KEEPALIVED_STATE=BACKUP -e KEEPALIVED_VIRTUAL_IPS=192.168.1.241   -d osixia/keepalived:2.0.20
​
# 清理
# docker rm -f keepalived01 
# ip addr del 192.168.1.241/32 dev ens32
​
​

keepalived工作模式分为抢占和非抢占模式:

抢占模式(preempt):通过优先级(priority)来决定谁是master,优先级高的为master,拥有虚拟IP。 这种模式有一个问题就是当原主节点从故障中恢复后会重新获得master角色抢占虚拟IP,这在有些场景可能会有问题(例如需要数据同步的场景,恢复后需要先同步数据)。

非抢占模式(nopreempt):keepalived发生故障才切换,否则不切换。(健康检查脚本,降低优先级将不会切换主从)

主节点keepalived:

# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived
​
global_defs {
   router_id 192.168.100.61
​
}
​
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh"
    interval 2
    weight -20
    rise 1
    fall 1 
}
​
vrrp_instance VI_1 {
    state MASTER
    interface ens224
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 192.168.100.61
    #nopreempt
​
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
         chk_nginx
    }
    virtual_ipaddress {
        192.168.100.100
    }
}

从节点keepalived:

# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived
global_defs {
    router_id 192.168.100.62
    script_user root
        enable_script_security 
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh"
    interval 2
    weight -20
        rise 1
        fall 1 
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens224
    virtual_router_id 251
    mcast_src_ip 192.168.100.62
    priority 90
    #nopreempt
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        192.168.100.100
    }
}

健康检查脚本:

[root@harporxy01 nlb]# cat /etc/keepalived/check_port.sh 
if curl https://192.168.100.61:443 -k  >/dev/null 2>&1; then
    exit 0
else
    exit 1
fi

haproxy

docker-compose文件

version: '3'
​
services:
  haproxy_4layer:
    image: haproxy:2.2.31
    ports:
      - 9000:9000
      - 443:443
    environment:
      - TZ=Asia/Shanghai      
    volumes:
      - ./data/haproxy_4layer.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
      - ./data/haproxy_crt:/usr/local/etc/haproxy/crt
    restart: always

私有证书:

mkdir -p /app/data/haproxy_crt
cd /app/data/haproxy_crt
# 生成私有证书
openssl req -newkey rsa:2048 -nodes -x509 -days 365 -keyout haproxy.key -out haproxy.crt -subj "/CN=example.com" 
cat haproxy.crt haproxy.key >> haproxy.pem
chmod 755 haproxy.pem 

haproxy配置:

[root@harporxy01 nlb]# cat data/haproxy_4layer.cfg 
global
  maxconn 4096
  log /dev/log local0
  log /dev/log local1 notice
​
defaults
  log global
  mode tcp
  option tcplog
  option tcp-check
  timeout connect 5000
  timeout client 50000
  timeout server 50000
​
frontend frontend_4layer01
  bind *:443 ssl crt /usr/local/etc/haproxy/crt/haproxy.pem
  mode tcp
#  tcp-request inspect-delay 5s
#  tcp-request content accept if { req_ssl_hello_type 1 }
#  use_backend backend_mysql if { req_ssl_sni -m end .mysql.example.com }
  http-request redirect scheme https code 301 if !{ ssl_fc }
  default_backend backend_nginx
​
frontend frontend_4layer02
  bind *:9000 # ssl crt /usr/local/etc/haproxy/crt/haproxy.pem
  mode tcp
  default_backend backend_mysql
​
backend backend_mysql
  mode tcp
  balance roundrobin
  server haproxy-lb-1 192.168.100.63:9000 check
  server haproxy-lb-2 192.168.100.64:9000 check
​
backend backend_nginx
  mode tcp
  balance roundrobin
  server nginx-lb-1 192.168.100.63:8080 check
  server nginx-lb-2 192.168.100.64:8080 check

lb4and7

[root@harporxy03 lb4and7]# cat lb4and701 
version: '3'
​
services:
  haproxy:
    image: haproxy:latest
    ports:
      - 9000:1080
    environment:
      - TZ=Asia/Shanghai      
    volumes:
      - ./data/haproxylb4.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
    restart: always
​
  nginx:
    image: nginx:latest
    ports:
      - 8080:80
    volumes:
      - ./data/default.conf:/etc/nginx/conf.d/default.conf:ro
    restart: always
​

nginx7层负载配置

[root@harporxy03 lb4and7]# cat data/default.conf 
upstream backend {
    server 192.168.100.65:8081  weight=5;
    server 192.168.100.66:8081  weight=5;
​
}
​
server {
    listen       80;
    listen  [::]:80;
    server_name  localhost;
​
    location / {
        proxy_pass http://backend;
    }
}

haproxy4层负载配置

[root@harporxy03 lb4and7]# cat data/haproxylb4.cfg 
global
  maxconn 4096
​
defaults
  mode tcp
  option tcplog
  option tcp-check
  timeout connect 5000
  timeout client 50000
  timeout server 50000
​
frontend frontend_4layer
  bind *:1080
  default_backend backend_mysql
​
backend backend_mysql
  balance roundrobin
  server mysql1 192.168.100.65:8306 check

nginx web模拟后端服务

data  nginx-web
[root@web01 web]# cat nginx-web 
version: '3'
​
services:
  nginx_web:
    image: nginx:latest
    ports:
      - 8081:80
    volumes:
      - ./data/default.conf:/etc/nginx/conf.d/default.conf:ro
      - ./data/html:/usr/share/nginx/html
    restart: always

[root@web01 web]# cat data/default.conf 
server {
    listen       80;
    listen  [::]:80;
    server_name  localhost;
    location / {
        root   /usr/share/nginx/html;
        index  index.html index.htm;
    }
    error_page   500 502 503 504  /50x.html;
    location = /50x.html {
        root   /usr/share/nginx/html;
    }
}

mysql服务

master:

[root@web01 mysql]# cat mysql-master 
version: '3.3'
services:
   # 服务名,  主节点
  mysql-master:
  # 容器名
    container_name: mysql-master
    # mysql 镜像
    image: mysql:5.7.31
    restart: always
    # 暴露端口号:  宿主机端口:容器内端口号
    ports:
      - 8306:3306
    privileged: true
    # 按照路径挂载目录: 日志、配置文件、数据
    volumes:
      - $PWD/msql-master/volumes/log:/var/log/mysql
      - $PWD/msql-master/volumes/conf/my.cnf:/etc/mysql/my.cnf
      - $PWD/msql-master/volumes/data:/var/lib/mysql
    # 环境变量: mysql密码
    environment:
      MYSQL_ROOT_PASSWORD: "123456"
    command: [
        '--character-set-server=utf8mb4',
        '--collation-server=utf8mb4_general_ci',
        '--max_connections=3000'
    ]

mysql-master配置文件:

conf/ data/ log/  
[root@web01 mysql]# cat msql-master/volumes/conf/my.cnf 
[mysqld]
#D,默认是1,一般取IP最后一段
server-id=1
​
# [必须]启用二进制日志
log-bin=mysql-bin
​
# 复制过滤:也就是指定哪个数据库不用同步(mysql库一般不同步)
binlog-ignore-db=mysql
​
# 确保binlog日志写入后与硬盘同步
sync_binlog = 1
​
# 跳过所有的错误,继续执行复制操作
slave-skip-errors = all
​
# 设置需要同步的数据库 binlog_do_db = 数据库名; 
# 如果是多个同步库,就以此格式另写几行即可。
# 如果不指明对某个具体库同步,表示同步所有库。除了binlog-ignore-db设置的忽略的库
# binlog_do_db = test #需要同步test数据库。

slave:

[root@web02 mysql]# cat mysql-slave 
version: '3.3'
services:
  # 从节点
 mysql-slave:
    container_name: mysql-slave
    image: mysql:5.7.31
    restart: always
    ports:
      - 8307:3306
    privileged: true
    volumes:
      - $PWD/msql-slave/volumes/log:/var/log/mysql
      - $PWD/msql-slave/volumes/conf/my.cnf:/etc/mysql/my.cnf
      - $PWD/msql-slave/volumes/data:/var/lib/mysql
    environment:
      MYSQL_ROOT_PASSWORD: "123456"
    command: [
        '--character-set-server=utf8mb4',
        '--collation-server=utf8mb4_general_ci',
        '--max_connections=3000'
    ]

mysql-slave配置文件:

[root@web02 mysql]# cat msql-slave/volumes/conf/my.cnf 
[mysqld]
#D,默认是1,一般取IP最后一段  
server-id=2
​
# 如果想实现 主-从(主)-从 这样的链条式结构,需要设置:
# log-slave-updates      只有加上它,从前一台机器上同步过来的数据才能同步到下一台机器。
​
# 设置需要同步的数据库,主服务器上不限定数据库,在从服务器上限定replicate-do-db = 数据库名;
# 如果不指明同步哪些库,就去掉这行,表示所有库的同步(除了ignore忽略的库)。
# replicate-do-db = test;
​
# 不同步test数据库 可以写多个例如 binlog-ignore-db = mysql,information_schema 
replicate-ignore-db=mysql
​
​
## 开启二进制日志功能,以备Slave作为其它Slave的Master时使用
log-bin=mysql-bin
log-bin-index=mysql-bin.index
​
## relay_log配置中继日志
#relay_log=edu-mysql-relay-bin  
​
## 还可以设置一个log保存周期:
#expire_logs_days=14
​
# 跳过所有的错误,继续执行复制操作
slave-skip-errors = all

进入mysql-master:

docker exec -it mysql-master bash
​
mysql -uroot -p123456
​
#查看server_id是否生效
mysql> show variables like '%server_id%';
+----------------+-------+
| Variable_name  | Value |
+----------------+-------+
| server_id      | 1     |
| server_id_bits | 32    |
+----------------+-------+
​
#看master信息 File 和 Position 从服务上要用
mysql> show master status;
+------------------+----------+--------------+------------------+-------------------+
| File             | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+------------------+----------+--------------+------------------+-------------------+
| mysql-bin.000005 |      154 |              | mysql            |                   |
+------------------+----------+--------------+------------------+-------------------+
1 row in set (0.00 sec)
​
​
#开权限
# 创建用户
mysql> create user 'slaver'@'%' identified by '123456';
# 授权
mysql> grant replication slave,replication client on *.* to 'slaver'@'%';
mysql> flush privileges;

进入mysql-slave:

docker exec -it mysql-slave bash
​
mysql -uroot -p123456
​
#查看server_id是否生效
mysql> show variables like '%server_id%';
+----------------+-------+
| Variable_name  | Value |
+----------------+-------+
| server_id      | 2     |
| server_id_bits | 32    |
+----------------+-------+
​
​
# 连接主mysql服务 master_log_file 和 master_log_pos(Position)的值要填写主master里查出来的值
​
change master to master_host='192.168.100.65',master_user='slaver',master_password='123456',master_port=8306,master_log_file='mysql-master-bin.000003', master_log_pos=154,master_connect_retry=30;
​
​
#启动slave
mysql> start slave;
​
#设置从服务器只读
SHOW VARIABLES LIKE '%read_only%'; #查看只读状态
SET GLOBAL super_read_only=1; #super权限的用户只读状态 1.只读 0:可写
SET GLOBAL read_only=1; #普通权限用户读状态 1.只读 0:可写


0

评论区