部署k8s-v1.28.0版本单机集群(shell脚本部署)

举报
依琳龙炫 发表于 2023/12/19 10:45:12 2023/12/19
【摘要】 部署k8s-v1.28.0版本单机集群(shell脚本部署)

部署准备工作
操作系统:Ubuntu20.04-server

主机名 IP地址 版本号
k8s-master01 172.31.0.201 v1.28.0
k8s-node01 172.31.0.205 v1.28.0
k8s-node02 172.31.0.206 v1.28.0

容器运行时:containerd:1.7.2
网络插件:calico:v3.23.5
网段应该是可以修改,具体没有操作,因为calico的Pod网段默认:192.168.0.0/16

分配的网络 网段
Pod 10.244.0.0/16
service 10.96.0.0/12

shell脚本可支持操作系统:CentOS7和Ubuntu18.04,Ubuntu20.04(需要看得懂shell脚本并进行修改这里是指Ubuntu版本)

#!/bin/bash
#
#********************************************************************
#Author:            lvxuan
#QQ:                360956175
#Date:              2022-12-25
#FileName:          install_kubernetes_containerd.sh
#URL:               http://www.wangxiaochun.com
#Description:       The test script
#Copyright (C):     2022 All rights reserved
#********************************************************************

# Ubuntu18.04和CentOS7通用脚本

#说明:安装Kubernetes服务器内存建议至少2G

# 如果需要做成高可用的版本,需要单独机器配置haproxy+keeplivaed

#-------------------------------------------------------------------------注意:唯一要改的就是版本和IP地址-----------------------------------------<<<

#### 只有Kubernetes集群的第一个master节点需要执行下面初始化函数
#### CentOS7内核版本太低,需要升级,再重新跑脚本安装(不是必须的,看需求)
#### 查看内核版本 uname -r
#### yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y ;
#### yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available -y ;
#### yum  --enablerepo=elrepo-kernel  install  kernel-ml -y ;
#### grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo) ; grubby --default-kernel  ; reboot
####


##############################################定义变量名####################################
KUBE_VERSION="1.28.0"
#KUBE_VERSION="1.24.4"
#KUBE_VERSION="1.24.3"
#KUBE_VERSION="1.24.0"

KUBE_VERSION2=$(echo $KUBE_VERSION |awk -F. '{print $2}')

UBTU=ubuntu
UBVER=20.04
#UBVER=18.04
CENT=centos
CENTVER=7
releasever=7.9
basearch=x86_64

KUBEAPI_IP=172.31.0.201   # 如果是单master这里的IP必须是和master一样
MASTER1_IP=172.31.0.201
MASTER2_IP=172.31.0.202
MASTER3_IP=172.31.0.203
NODE1_IP=172.31.0.205
NODE2_IP=172.31.0.206
NODE3_IP=172.31.0.207
HARBOR_IP=172.31.0.200

DOMAIN=longxuan.vip

MASTER1=k8s-master01.$DOMAIN
MASTER2=k8s-master02.$DOMAIN
MASTER3=k8s-master03.$DOMAIN
NODE1=k8s-node01.$DOMAIN
NODE2=k8s-node02.$DOMAIN
NODE3=k8s-node03.$DOMAIN
HARBOR=harbor.$DOMAIN

POD_NETWORK="10.244.0.0/16"
SERVICE_NETWORK="10.96.0.0/12"

IMAGES_URL="registry.aliyuncs.com/google_containers"


LOCAL_IP=`hostname -I|awk '{print $1}'`

source /etc/os-release

COLOR_SUCCESS="echo -e \033[1;32m"
COLOR_FAILURE="echo -e \033[1;31m"
END="\033[m"

color(){
    RES_COL=60
    MOVE_TO_COL="echo -en \033[${RES_COL}G"
    SETCOLOR_SUCCESS="echo -en \033[1;32m"
    SETCOLOR_FAILURE="echo -en \033[1;31m"
    SETCOLOR_WARNING="echo -en \033[1;33m"
    SETCOLOR_NORMAL="echo -en \E[0m"
    echo -n "$1" && $MOVE_TO_COL
    echo -n "["
    if [ $2 = "success" -o $2 = "0" ];then
        ${SETCOLOR_SUCCESS}
        echo -n $"  OK  "
    elif [ $2 = "failure" -o $2 = "1"  ];then
        ${SETCOLOR_FAILURE}
        echo -n $"FAILED"
    else
        ${SETCOLOR_WARNING}
        echo -n $"WARNING"
    fi
    ${SETCOLOR_NORMAL}
    echo -n "]"
    echo
}

check(){
    if [ $ID = ${UBTU} -a ${VERSION_ID} = ${UBVER}  ];then
        true
    elif [ $ID = ${CENT} -a ${VERSION_ID} = ${CENTVER} ];then
        true
    else
        color "不支持此操作系统,退出!" 1
        exit
    fi
    if [ $KUBE_VERSION2 -lt 24 ] ;then
        color "当前kubernetes版本过低,Containerd要求不能低于v1.24.0版,退出!" 1
        exit
    fi
}


install_prepare () {
    cat >> /etc/hosts <<EOF

$KUBEAPI_IP kubeapi.$DOMAIN
$MASTER1_IP $MASTER1
$MASTER2_IP $MASTER2
$MASTER3_IP $MASTER3
$NODE1_IP $NODE1
$NODE2_IP $NODE2
$NODE3_IP $NODE3
$HARBOR_IP $HARBOR
EOF
    hostnamectl set-hostname $(awk -v ip=$LOCAL_IP '{if($1==ip && $2 !~ "kubeapi")print $2}' /etc/hosts)
    swapoff -a
    sed -i '/swap/s/^/#/' /etc/fstab
        cat >> /etc/rc.local <<EOF
#!/bin/bash
sudo swapoff -a
EOF
    chmod a+x /etc/rc.local
        systemctl enable rc.local
        systemctl restart rc.local
    color "安装前准备完成!" 0
    sleep 1
}

config_kernel () {
    cat <<EOF | tee /etc/modules-load.d/k8s.conf
    overlay
    br_netfilter
EOF

    modprobe overlay
    modprobe br_netfilter

    cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF
    sysctl --system
}

install_containerd(){
    if [ $ID = "${UBTU}" -a ${VERSION_ID} = "${UBVER}"  ];then
       apt update
       apt -y install containerd || { color "安装Containerd失败!" 1; exit 1; }
       mkdir /etc/containerd/
       containerd config default > /etc/containerd/config.toml
       #sed -i "s#k8s.gcr.io#${IMAGES_URL}#g"  /etc/containerd/config.toml
           sed -i "s#registry.k8s.io#${IMAGES_URL}#g"  /etc/containerd/config.toml
       sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml
       systemctl restart containerd.service
       [ $? -eq 0 ] && { color "安装Containerd成功!" 0; sleep 1; } || { color "安装Containerd失败!" 1 ; exit 2; }

    # centos版本
    else
       if [ $ID = "${CENT}" -a ${VERSION_ID} = "${CENTVER}" ];then
       yum -y install ipset ipvsadm
       [ $? -eq 0 ] && { color "安装ipvsadm成功!" 0; sleep 1; } || { color "安装ipvsadm失败!" 1 ; exit 2; }
   cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
   chmod 755 /etc/sysconfig/modules/ipvs.modules

   cat <<EOF | sudo tee /etc/yum.repos.d/docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF

   cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

    yum clean all
    yum makecache fast

    yum -y install containerd || { color "安装Containerd失败!" 1; exit 1; }
    mkdir /etc/containerd/
    containerd config default > /etc/containerd/config.toml
    sed -i "s#registry.k8s.io#${IMAGES_URL}#g"  /etc/containerd/config.toml
    sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml
    systemctl restart containerd.service
    [ $? -eq 0 ] && { color "安装Containerd成功!" 0; sleep 1; } || { color "安装Containerd失败!" 1 ; exit 2; }

   fi
   fi
}
install_kubeadm(){
    if [ $ID = "${UBTU}" -a ${VERSION_ID} = "${UBVER}"  ];then
       apt-get update && apt-get install -y apt-transport-https
       curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
       cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
       apt-get update
       apt-cache madison kubeadm |head
       ${COLOR_FAILURE}"5秒后即将安装: kubeadm-"${KUBE_VERSION}" 版本....."${END}
       ${COLOR_FAILURE}"如果想安装其它版本,请按ctrl+c键退出,修改版本再执行"${END}
       sleep 6

    #安装指定版本
       apt install -y  kubeadm=${KUBE_VERSION}-00 kubelet=${KUBE_VERSION}-00 kubectl=${KUBE_VERSION}-00
       [ $? -eq 0 ] && { color "安装kubeadm成功!" 0;sleep 1; } || { color "安装kubeadm失败!" 1 ; exit 2; }

    else
       if [ $ID = "${CENT}" -a ${VERSION_ID} = "${CENTVER}"  ] ;then
          #安装指定版本
          yum install -y  kubeadm-${KUBE_VERSION}-0 kubelet-${KUBE_VERSION}-0 kubectl-${KUBE_VERSION}-0
          [ $? -eq 0 ] && { color "安装kubeadm成功!" 0; sleep 1; } || { color "安装kubeadm失败!" 1 ; exit 2; }
       fi
      fi
          systemctl restart kubelet.service
          systemctl enable kubelet.service

    #实现kubectl命令自动补全功能
        if [ $ID = "${CENT}" -a ${VERSION_ID} = "${CENTVER}"  ] ;then
           yum install -y bash-completion
           source /usr/share/bash-completion/bash_completion || kubectl completion bash > /etc/profile.d/kubectl_completion.sh
        fi
        apt install -y bash-completion
        kubectl completion bash > /etc/profile.d/kubectl_completion.sh
        source /etc/profile.d/kubectl_completion.sh
        source /usr/share/bash-completion/bash_completion
}

#只有Kubernetes集群的第一个master节点需要执行下面初始化函数
kubernetes_init () {
    kubeadm init --control-plane-endpoint="kubeapi.$DOMAIN" \
                 --kubernetes-version=v${KUBE_VERSION}  \
                 --pod-network-cidr=${POD_NETWORK} \
                 --service-cidr=${SERVICE_NETWORK} \
                 --token-ttl=0  \
                 --upload-certs \
                 --image-repository=${IMAGES_URL} |tee /root/.kube/creta.txt
    [ $? -eq 0 ] && color "Kubernetes集群初始化成功!" 0 || { color "Kubernetes集群初始化失败!" 1 ; exit 3; }
    mkdir -p $HOME/.kube
    touch $HOME/.kube/creta.txt
    cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    chown $(id -u):$(id -g) $HOME/.kube/config
}

reset_kubernetes() {
    kubeadm reset -f --cri-socket unix:///run/cri-dockerd.sock
    rm -rf  /etc/cni/net.d/  $HOME/.kube/config
}

config_crictl () {
    cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
EOF

# curl https://docs.projectcalico.org/manifests/calico.yaml -O
wget https://docs.projectcalico.org/v3.23/manifests/calico.yaml --no-check-certificate
}

check

PS3="请选择编号(1-4): "
ACTIONS="
初始化新的Kubernetes集群
加入已有的Kubernetes集群
退出Kubernetes集群
退出本程序
"
select action in $ACTIONS;do
    case $REPLY in
    1)
        install_prepare
        config_kernel
        install_containerd
        install_kubeadm
        kubernetes_init
        config_crictl
                echo -e "----------------------------------------------------------------------------------------------------------------------------"
                $SETCOLOR_WARNING"如果需要实现kubectl补全,请执行:source /etc/profile.d/kubectl_completion.sh && source /usr/share/bash-completion/bash_completion\n"${END}
        echo -e "----------------------------------------------------------------------------------------------------------------------------"
        $SETCOLOR_WARNING"需要添加node节点,请执行命令查看文件:cat /root/.kube/creta.txt\n"${END}
        echo -e "----------------------------------------------------------------------------------------------------------------------------"
        #echo "如果需要实现kubectl补全,执行:source /etc/profile.d/kubectl_completion.sh && source /usr/share/bash-completion/bash_completion"
                $COLOR_SUCCESS"先把node节点加入已有的Kubernetes集群已准备完毕后!!再添加网络插件,在主master执行:kubectl apply -f calico.yaml\n"${END}
        #kubectl apply -f calico.yaml
        echo -e "----------------------------------------------------------------------------------------------------------------------------"
        #kubectl apply -f calico.yaml
        #[ $? -eq 0 ] && { color "安装网络插件成功!" 0; sleep 5; } || { color "安装网络插件失败!" 1 ; exit 2; }
        break
        ;;
    2)
        install_prepare
        config_kernel
        install_containerd
        install_kubeadm
        $COLOR_SUCCESS"加入已有的Kubernetes集群已准备完毕,还需要执行最后一步其他节点加入集群的命令 kubeadm join ... "${END}
        break
        ;;
    3)
        reset_kubernetes
        $COLOR_SUCCESS"如果重启初始化,记得要把port:6443的进程杀掉命令:kill -9 $(pgrep kube-apiserver) "${END}
        kill -9 $(pgrep kube-apiserver)
        [ $? -eq 0 ] && { color "已经把port:6443的进程杀掉 $(pgrep kube-apiserver)" 0; sleep 2; } || { color "进程获取失败,不需要杀掉 $(pgrep kube-apiserver)" 1 ; exit 2; }

        break
        ;;
    4)
        exit
        ;;
    esac
done
exec bash

执行

[root@k8s-master01 ~]# bash install_k8s.sh
1) 初始化新的Kubernetes集群
2) 加入已有的Kubernetes集群
3) 退出Kubernetes集群
4) 退出本程序
请选择编号(1-4):

脚本执行方法:第一台master选择1初始化集群,node节点机器选择2加入集群,如果有报错需要重置k8s集群选择3即可。还有,如不想部署并退出脚本选择4,剩下的部署完成并成功后和官方的加入节点一样。

下面的截图是部署k8s成功的信息

node节点需要执行的命令

加入节点的命令(这条命令在部署完master成功后会出现的)

root@k8s-node01:~# kubeadm join kubeapi.longxuan.vip:6443 --token g57ge2.z1salqt0h6dc17g0 \
>         --discovery-token-ca-cert-hash sha256:5c94af9886c0b2ef751d97bb2f4b8acaae749612c0e6a2d737c7b109eed7a976

检查node状态,发现NotReady状态,原因是还没部署网络插件

[root@k8s-master01 ~]# kubectl get nodes
NAME                        STATUS     ROLES           AGE     VERSION
k8s-master01.longxuan.vip   NotReady   control-plane   7m50s   v1.28.0
k8s-node01.longxuan.vip     NotReady   <none>          14s     v1.28.0
k8s-node02.longxuan.vip     NotReady   <none>          8s      v1.28.0

部署网络插件calico(最好先部署node节点并加入master集群后在执行部署网络插件)

# 下载(脚本本身会下载,除非遇到脚本异常需要手工下载,但执行是必须要手工执行)
wget https://docs.projectcalico.org/v3.23/manifests/calico.yaml --no-check-certificate

# 执行
[root@k8s-master01 ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created

1,通过kubectl命令查看部署后的信息状态

[root@k8s-master01 ~]# kubectl get nodes
NAME                        STATUS   ROLES           AGE     VERSION
k8s-master01.longxuan.vip   Ready    control-plane   14m     v1.28.0
k8s-node01.longxuan.vip     Ready    <none>          6m37s   v1.28.0
k8s-node02.longxuan.vip     Ready    <none>          6m31s   v1.28.0

2,查看所有的pod状态

[root@k8s-master01 ~]# kubectl get pod -A
NAMESPACE     NAME                                                READY   STATUS    RESTARTS      AGE
kube-system   calico-kube-controllers-7cb4fd5784-kthr8            1/1     Running   0             6m26s
kube-system   calico-node-7pzl8                                   1/1     Running   0             6m26s
kube-system   calico-node-bzxsn                                   1/1     Running   0             6m26s
kube-system   calico-node-cmnjz                                   1/1     Running   0             6m26s
kube-system   coredns-66f779496c-2jjwd                            1/1     Running   0             14m
kube-system   coredns-66f779496c-sm5sx                            1/1     Running   0             14m
kube-system   etcd-k8s-master01.longxuan.vip                      1/1     Running   2 (17m ago)   14m
kube-system   kube-apiserver-k8s-master01.longxuan.vip            1/1     Running   2 (17m ago)   14m
kube-system   kube-controller-manager-k8s-master01.longxuan.vip   1/1     Running   2 (17m ago)   14m
kube-system   kube-proxy-ln7rc                                    1/1     Running   0             7m12s
kube-system   kube-proxy-w2wvl                                    1/1     Running   0             14m
kube-system   kube-proxy-z4qd5                                    1/1     Running   0             7m6s
kube-system   kube-scheduler-k8s-master01.longxuan.vip            1/1     Running   2 (17m ago)   14m

使用命令行部署一个DeployMents的Nginx(ClusterIP)

[root@k8s-master01 ~]# kubectl create deployment nginx --image=nginx:alpine --replicas=2

检查pod状态

[root@k8s-master01 ~]# kubectl get pod -A
NAMESPACE     NAME                                                READY   STATUS              RESTARTS      AGE
default       nginx-7854ff8877-qj2q5                              0/1     Terminating         0             34m
default       nginx-b4ccb96c6-jqlc2                               0/1     ContainerCreating   0             3m4s

使用describe发现拉取镜像失败,只能手动拉取

[root@k8s-master01 ~]# kubectl describe pod -n default  nginx-b4ccb96c6-jqlc2
Name:                      nginx-b4ccb96c6-jqlc2
Namespace:                 default
Priority:                  0
Service Account:           default
Node:                      k8s-node01.longxuan.vip/172.31.0.205
Start Time:                Sun, 17 Dec 2023 14:32:17 +0000
Labels:                    app=nginx
...
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  16m   default-scheduler  Successfully assigned default/nginx-b4ccb96c6-jqlc2 to k8s-node01.longxuan.
  Normal  Pulling    16m   kubelet            Pulling image "nginx:alpine"

手工拉取命令

[root@k8s-master01 ~]# ctr images pull docker.io/library/nginx:alpine

再次检查镜像是否已经下载,如果有查询如下

[root@k8s-master01 ~]# ctr images ls
REF                            TYPE                                                      DIGEST                                                                  SIZE     PLATFORMS                                                                                LABELS
docker.io/library/nginx:alpine application/vnd.docker.distribution.manifest.list.v2+json sha256:3923f8de8d2214b9490e68fd6ae63ea604deddd166df2755b788bef04848b9bc 17.1 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x -

检查部署后的获取的IP

[root@k8s-master01 ~]# kubectl get pod -owide
NAME                    READY   STATUS    RESTARTS   AGE   IP               NODE                      NOMINATED NODE   READINESS GATES
nginx-b4ccb96c6-6xt4r   1/1     Running   0          80m   10.244.154.196   k8s-node01.longxuan.vip   <none>           <none>
nginx-b4ccb96c6-v6g4r   1/1     Running   0          80m   10.244.2.199     k8s-node02.longxuan.vip   <none>           <none>

集群内部验证,有以下返回信息说明验证没有问题

[root@k8s-master01 ~]# curl 10.244.2.199
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

使用命令行部署Nginx(NodePort)外网可以访问,只能二选一

[root@k8s-master01 ~]# kubectl expose deployment nginx --port=80 --target-port=80 --type=NodePort
或者
[root@k8s-master01 ~]# kubectl create service nodeport nginx --tcp=80:80

检查

[root@k8s-master01 ~]# kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        36h
nginx        NodePort    10.102.11.114   <none>        80:30401/TCP   63m

所有的pod信息

外部浏览器验证

http://172.31.0.205:30401/

nginx外网NodePort模式访问的状态

【版权声明】本文为华为云社区用户原创内容,转载时必须标注文章的来源(华为云社区)、文章链接、文章作者等基本信息, 否则作者和本社区有权追究责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。