kubernetes 1.20 二进制方式高可用部署之扩容多Master(高可用架构)(7)

举报
凤凰涅槃 发表于 2022/12/05 12:32:37 2022/12/05
【摘要】 八、扩容多Master(高可用架构) 8.1 部署Master2 NodeMaster2 与已部署的Master1所有操作一致。所以我们只需将Master1所有K8s文件拷贝过来,再修改下服务器IP和主机名启动即可。master1节点#安装Docker[root@k8s-master1 cfg]# scp /usr/bin/docker* 192.168.1.23:/usr/bin[roo...

八、扩容多Master(高可用架构)

8.1 部署Master2 Node

Master2 与已部署的Master1所有操作一致。所以我们只需将Master1所有K8s文件拷贝过来,再修改下服务器IP和主机名启动即可。
master1节点


#安装Docker

[root@k8s-master1 cfg]# scp /usr/bin/docker* 192.168.1.23:/usr/bin
[root@k8s-master1 cfg]# scp /usr/bin/runc 192.168.1.23:/usr/bin
[root@k8s-master1 cfg]# scp /usr/bin/containerd* 192.168.1.23:/usr/bin
[root@k8s-master1 cfg]# scp /usr/lib/systemd/system/docker.service 192.168.1.23:/usr/lib/systemd/system
[root@k8s-master1 cfg]# scp -r /etc/docker 192.168.1.23:/etc

master2节点


# 在Master2启动Docker
systemctl daemon-reload
systemctl enable docker
systemctl start docker
#在Master2创建etcd证书目录
[root@k8s-master2 ~]# mkdir -p /opt/etcd/ssl

master1节点


#拷贝Master1上所有K8s文件和etcd证书到Master2
[root@k8s-master1 cfg]# scp -r /opt/kubernetes 192.168.1.23:/opt
[root@k8s-master1 cfg]# scp -r /opt/etcd/ssl 192.168.1.23:/opt/etcd
[root@k8s-master1 cfg]# scp /usr/lib/systemd/system/kube* 192.168.1.23:/usr/lib/systemd/system
[root@k8s-master1 cfg]# scp /usr/bin/kubectl  192.168.1.23:/usr/bin
[root@k8s-master1 cfg]# scp -r ~/.kube 192.168.1.23:~

master2节点


#删除kubelet证书和kubeconfig文件
[root@k8s-master2 ~]# rm -f /opt/kubernetes/cfg/kubelet.kubeconfig
[root@k8s-master2 ~]# rm -f /opt/kubernetes/ssl/kubelet*


#修改配置文件IP和主机名
*修改apiserver、kubelet和kube-proxy配置文件为本地IP
[root@k8s-master2 ~]# vim /opt/kubernetes/cfg/kube-apiserver.conf
--bind-address=192.168.1.23
--advertise-address=192.168.1.23

[root@k8s-master2 ~]# vim /opt/kubernetes/cfg/kube-controller-manager.kubeconfig
server: https://192.168.1.23:6443

[root@k8s-master2 ~]# vim /opt/kubernetes/cfg/kube-scheduler.kubeconfig
server: https://192.168.1.23:6443

[root@k8s-master2 ~]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
server: https://192.168.1.23:6443

[root@k8s-master2 ~]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
server: https://192.168.1.23:6443

[root@k8s-master2 ~]# vim /opt/kubernetes/cfg/kubelet.conf
--hostname-override=k8s-master2

[root@k8s-master2 ~]# vim /opt/kubernetes/cfg/kube-proxy-config.yml
hostnameOverride: k8s-master2

[root@k8s-master2 ~]# vi ~/.kube/config
server: https://192.168.1.23:6443


#启动设置开机启动
systemctl daemon-reload
systemctl enable kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy
systemctl start kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy

检验并加入集群

#查看集群状态
[root@k8s-master2 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}


#批准kubelet证书申请

*查看证书请求
[root@k8s-master2 ~]# kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
node-csr--okVOhcnMlwp9j2L64uYd6HKiopCDU1FQf9Ywj_EUhw   2m55s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-BQJv1fS07fW1u4uPTID4M0ybNJV60Br71DjA67DmJxk   64m     kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
node-csr-xQsFeNF5nlB-rZQr2cIxFB18ET3kAGYHSC9GMSKDoI8   73m     kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued

*授权请求
[root@k8s-master2 ~]# kubectl certificate approve node-csr--okVOhcnMlwp9j2L64uYd6HKiopCDU1FQf9Ywj_EUhw
certificatesigningrequest.certificates.k8s.io/node-csr--okVOhcnMlwp9j2L64uYd6HKiopCDU1FQf9Ywj_EUhw approved


# 查看Node
[root@k8s-master2 ~]# kubectl get node
NAME          STATUS     ROLES    AGE    VERSION
k8s-master1   Ready      <none>   113m   v1.20.4
k8s-master2   NotReady   <none>   54s    v1.20.4
k8s-node1     Ready      <none>   73m    v1.20.4
k8s-node2     Ready      <none>   65m    v1.20.4


[root@k8s-master1 cfg]# kubectl get node
NAME          STATUS     ROLES    AGE    VERSION
k8s-master1   Ready      <none>   113m   v1.20.4
k8s-master2   NotReady   <none>   42s    v1.20.4
k8s-node1     Ready      <none>   73m    v1.20.4
k8s-node2     Ready      <none>   65m    v1.20.4

NotReady是因为Calico正在为master2进行初始化相关网络镜像

Waiting…

最终结果

[root@k8s-master2 ~]# kubectl get pod -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-97769f7c7-9d49d   1/1     Running   0          107m
calico-node-8djzj                         1/1     Running   0          107m
calico-node-bkdm6                         1/1     Running   0          8m2s
calico-node-h6ghf                         1/1     Running   0          80m
calico-node-nj9l7                         1/1     Running   0          72m
coredns-6d8f96d957-kzn2g                  1/1     Running   0          47m



[root@k8s-master2 ~]# kubectl get node
NAME          STATUS   ROLES    AGE    VERSION
k8s-master1   Ready    <none>   120m   v1.20.4
k8s-master2   Ready    <none>   8m6s   v1.20.4
k8s-node1     Ready    <none>   80m    v1.20.4
k8s-node2     Ready    <none>   72m    v1.20.4

8.2 部署Nginx+Keepalived高可用负载均衡器

安装软件包
master1、master2节点



#安装软件包(主/备)
[root@k8s-master1 cfg]# yum -y install epel-release nginx keepalived

Nginx
master1、master2节点


#Nginx配置文件(主/备一样)
[root@k8s-master1 cfg]# vim /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
       server 192.168.1.20:6443;   # Master1 APISERVER IP:PORT
       server 192.168.1.23:6443;   # Master2 APISERVER IP:PORT
    }

    server {
       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
       proxy_pass k8s-apiserver;
    }
}

http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    server {
        listen       80 default_server;
        server_name  _;

        location / {
        }
    }
}

Keepalived
master1节点


#keepalived配置文件(Nginx Master)
[root@k8s-master1 cfg]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER
}

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33  # 修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
    priority 100    # 优先级,备服务器设置 90
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    # 虚拟IP
    virtual_ipaddress {
        10.8.165.250/24
    }
    track_script {
        check_nginx
    }
}

说明:
• vrrp_script:指定检查nginx工作状态脚本(根据nginx状态判断是否故障转移)
• virtual_ipaddress:虚拟IP(VIP)

#准备上述配置文件中检查nginx运行状态的脚本
[root@k8s-master1 cfg]# vim /etc/keepalived/check_nginx.sh
#!/bin/bash
count=$(ss -antp |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    exit 1
else
    exit 0
fi


#赋予脚本权限
[root@k8s-master1 cfg]# chmod +x /etc/keepalived/check_nginx.sh

master2节点


#keepalived配置文件(Nginx Backup)
[root@k8s-master2 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_BACKUP
}

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.8.165.250/24
    }
    track_script {
        check_nginx
    }
}


#准备上述配置文件中检查nginx运行状态的脚本
[root@k8s-master2 ~]# vim /etc/keepalived/check_nginx.sh
#!/bin/bash
count=$(ss -antp |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    exit 1
else
    exit 0
fi

#赋予脚本权限
[root@k8s-master2 ~]# chmod +x /etc/keepalived/check_nginx.sh

注:keepalived根据脚本返回状态码(0为工作正常,非0不正常)判断是否故障转移。

开启nginx、keepalived
master1、master2节点


#启动并设置开机启动
systemctl daemon-reload
systemctl enable nginx keepalived
systemctl start nginx keepalived

报错
[root@k8s-master1 cfg]# journalctl -xe -u nginx

– Unit nginx.service has begun starting up.
6月 09 21:47:41 k8s-master1 nginx[30694]: nginx: [emerg] unknown directive “stream” in /etc/nginx/nginx.conf:13

解决
**应该是缺少modules模块
[root@k8s-master1 cfg]# yum -y install nginx-all-modules.noarch
[root@k8s-master1 cfg]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful

最后重启nginx服务

#查看keepalived工作状态
[root@k8s-master1 cfg]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:79:95:e6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.20/24 brd 10.8.165.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 10.8.165.250/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::e187:8e2f:2977:6d12/64 scope link 
       valid_lft forever preferred_lft forever
    inet6 fe80::82cf:7f96:a8f:69e1/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever


[root@k8s-master2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:ab:04:62 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.23/24 brd 10.8.165.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:feab:462/64 scope link 
       valid_lft forever preferred_lft forever

在Nginx Master上可以看到,在ens33网卡绑定了10.8.165.250 虚拟IP,说明工作正常

关闭主节点Nginx,测试VIP是否漂移到备节点服务器

master1节点


#关闭主节点Nginx,测试VIP是否漂移到备节点服务器
[root@k8s-master1 cfg]# pkill nginx
[root@k8s-master1 cfg]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:79:95:e6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.20/24 brd 10.8.165.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::e187:8e2f:2977:6d12/64 scope link 
       valid_lft forever preferred_lft forever
    inet6 fe80::82cf:7f96:a8f:69e1/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever


[root@k8s-master2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:ab:04:62 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.23/24 brd 10.8.165.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 10.8.165.250/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:feab:462/64 scope link 
       valid_lft forever preferred_lft forever

在Nginx Backup可以看到,在ens33网卡绑定了10.8.165.250 虚拟IP,漂移成功。

[root@k8s-master1 cfg]# systemctl start nginx
[root@k8s-master1 cfg]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:79:95:e6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.20/24 brd 10.8.165.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 10.8.165.250/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::e187:8e2f:2977:6d12/64 scope link 
       valid_lft forever preferred_lft forever
    inet6 fe80::82cf:7f96:a8f:69e1/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever

[root@k8s-master2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:ab:04:62 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.23/24 brd 10.8.165.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:feab:462/64 scope link 
       valid_lft forever preferred_lft forever


当Nginx Master重新启动nginx服务后,VIP又从新漂移绑定到ens33上,而Nginx Backup的ens33网卡上的VIP解绑。

访问负载均衡器测试

node1节点


#找K8s集群中任意一个节点,使用curl查看K8s版本测试,使用VIP访问
[root@k8s-master1 cfg]# curl -k https://10.8.165.250:16443/version
curl: (35) TCP connection reset by peer


[root@k8s-master1 cfg]# tail /var/log/nginx/k8s-access.log -f
192.168.1.21 192.168.31.71:6443, 192.168.31.74:6443 - [09/Jun/2022:22:12:56 +0800] 502 0, 0
192.168.1.21 k8s-apiserver - [09/Jun/2022:22:15:02 +0800] 502 0
192.168.1.20 192.168.31.74:6443, 192.168.31.71:6443 - [09/Jun/2022:22:16:02 +0800] 502 0, 0
192.168.1.21 192.168.31.74:6443, k8s-apiserver - [09/Jun/2022:22:17:56 +0800] 502 0, 0
192.168.1.21 k8s-apiserver - [09/Jun/2022:22:18:00 +0800] 502 0
192.168.1.21 192.168.31.71:6443, 192.168.31.74:6443 - [09/Jun/2022:22:18:51 +0800] 502 0, 0
192.168.1.21 192.168.31.71:6443, 192.168.31.74:6443 - [09/Jun/2022:22:20:03 +0800] 502 0, 0
192.168.1.21 192.168.31.71:6443, 192.168.31.74:6443 - [09/Jun/2022:22:22:21 +0800] 502 0, 0
192.168.1.21 192.168.31.71:6443, 192.168.31.74:6443 - [09/Jun/2022:22:26:21 +0800] 502 0, 0
192.168.1.21 192.168.31.71:6443, 192.168.31.74:6443 - [09/Jun/2022:22:28:38 +0800] 502 0, 0
192.168.1.21 192.168.31.71:6443, 192.168.31.74:6443 - [09/Jun/2022:22:41:47 +0800] 502 0, 0

到此还没结束,还有下面最关键的一步

8.3 修改所有Worker Node连接LB VIP

#在所有Worker Node执行
master1、node1、node2节点


[root@k8s-master1 cfg]# sed -i 's#192.168.1.20:6443#10.8.165.250:16443#' /opt/kubernetes/cfg/*

master2节点


[root@k8s-master2 ~]# sed -i 's#192.168.1.23:6443#10.8.165.250:16443#' /opt/kubernetes/cfg/*

master1、master2、node1、node2节点


systemctl restart kubelet kube-proxy
[root@k8s-master1 cfg]# kubectl get node
NAME          STATUS     ROLES    AGE     VERSION
k8s-master1   NotReady   <none>   3h28m   v1.20.4
k8s-master2   Ready      <none>   95m     v1.20.4
k8s-node1     NotReady   <none>   168m    v1.20.4
k8s-node2     NotReady   <none>   160m    v1.20.4
【版权声明】本文为华为云社区用户原创内容,转载时必须标注文章的来源(华为云社区)、文章链接、文章作者等基本信息, 否则作者和本社区有权追究责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。