Kubernetes 集群常用操作总结

举报
zuozewei 发表于 2021/09/30 17:45:14 2021/09/30
【摘要】 Kubernetes 集群常用操作总结

卸载步骤

卸载:
kubeadm reset

清理:
kubeadm reset -f
modprobe -r ipip
lsmod
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd

进程列表

[root@7dgroup3 ~]# ps -ef|grep kube
root      8395 26979  0 18:03 pts/1    00:00:00 grep --color=auto kube
root     20501     1  2 13:42 ?        00:06:50 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt --cadvisor-port=0 --cgroup-driver=systemd --rotate-certificates=true --cert-dir=/var/lib/kubelet/pki
root     20744 20728  0 13:42 ?        00:02:26 etcd --advertise-client-urls=https://127.0.0.1:2379 --cert-file=/etc/kubernetes/pki/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib/etcd --initial-advertise-peer-urls=https://127.0.0.1:2380 --initial-cluster=7dgroup3=https://127.0.0.1:2380 --key-file=/etc/kubernetes/pki/etcd/server.key --listen-client-urls=https://127.0.0.1:2379 --listen-peer-urls=https://127.0.0.1:2380 --name=7dgroup3 --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt --peer-client-cert-auth=true --peer-key-file=/etc/kubernetes/pki/etcd/peer.key --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt --snapshot-count=10000 --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
root     20793 20745  1 13:42 ?        00:03:56 kube-controller-manager --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=192.168.0.0/16 --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt --cluster-signing-key-file=/etc/kubernetes/pki/ca.key --controllers=*,bootstrapsigner,tokencleaner --kubeconfig=/etc/kubernetes/controller-manager.conf --leader-elect=true --node-cidr-mask-size=24 --root-ca-file=/etc/kubernetes/pki/ca.crt --service-account-private-key-file=/etc/kubernetes/pki/sa.key --use-service-account-credentials=true
root     20806 20746  1 13:42 ?        00:04:47 kube-apiserver --authorization-mode=Node,RBAC --advertise-address=172.17.211.142 --allow-privileged=true --client-ca-file=/etc/kubernetes/pki/ca.crt --disable-admission-plugins=PersistentVolumeLabel --enable-admission-plugins=NodeRestriction --enable-bootstrap-token-auth=true --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-key-file=/etc/kubernetes/pki/sa.pub --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/etc/kubernetes/pki/apiserver.crt --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
root     20814 20760  0 13:42 ?        00:01:18 kube-scheduler --address=127.0.0.1 --kubeconfig=/etc/kubernetes/scheduler.conf --leader-elect=true
root     21095 21071  0 13:43 ?        00:00:22 /usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf
root     22065 22047  0 13:43 ?        00:00:03 /usr/bin/kube-controllers
65534    22166 22137  0 13:43 ?        00:00:12 /heapster --source=kubernetes:https://kubernetes.default --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086

重启的方法

[root@7dgroup3 ~]# swapoff -a && systemctl stop kubelet

常用命令

查看cluster-info

[root@7dgroup3 /]# kubectl cluster-info
Kubernetes master is running at https://172.17.211.142:6443
Heapster is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/heapster/proxy
KubeDNS is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
monitoring-grafana is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
monitoring-influxdb is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/monitoring-influxdb/proxy

[root@7dgroup3 /]#

查看cluster-info的dump信息

[root@s5 ~]# kubectl cluster-info dump
{
"kind": "NodeList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/nodes",
"resourceVersion": "35732"
},

查看deployment

[root@7dgroup3 ~]# kubectl -n kube-system get deployments
NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
calico-kube-controllers       1         1         1            1           98d
coredns                       2         2         2            2           98d
......

删除deployment

[root@7dgroup3 ~]# kubectl -n kube-system delete deployment heapster-7dgroup
deployment.extensions "heapster-7dgroup" deleted

查看services

[root@7dgroup3 shell]# kubectl -n kube-system get svc -o wide
NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
heapster               ClusterIP   10.106.70.78     <none>        80/TCP                   23h   k8s-app=heapster
......
[root@7dgroup3 shell]#

查看nodes

[root@7dgroup3 ~]# kubectl get nodes
NAME       STATUS    ROLES     AGE       VERSION
7dgroup    Ready     <none>    90d       v1.11.0
7dgroup2   Ready     <none>    90d       v1.11.0
7dgroup3   Ready     master    98d       v1.11.0

查看Service Account

[root@7dgroup3 ~]# kubectl get sa --all-namespaces
NAMESPACE     NAME                                 SECRETS   AGE
default       default                              1         98d
kube-public   default                              1         98d
kube-system   attachdetach-controller              1         98d
..........

[root@7dgroup3 ~]#

查看cluster DNS Service信息

[root@7dgroup3 /]# kubectl get service -l k8s-app=kube-dns --namespace=kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP   12d
[root@7dgroup3 /]#

查看cluster DNS replication controllers

[root@7dgroup3 /]# kubectl get pod --selector k8s-app=kube-dns --namespace=kube-system
NAME                       READY     STATUS    RESTARTS   AGE
coredns-78fcdf6894-m7rgl   1/1       Running   0          3d
coredns-78fcdf6894-tpkql   1/1       Running   0          3d
[root@7dgroup3 /]#

查看cluster DNS services

[root@7dgroup3 /]# kubectl get service -l k8s-app=kube-dns --namespace=kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP   12d
[root@7dgroup3 /]#

查看components

[root@7dgroup3 /]# kubectl -s https://172.17.211.142:6443 get componentstatus
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health": "true"}

查看endpoint

[root@7dgroup3 shell]# kubectl get endpoints
NAME         ENDPOINTS             AGE
kubernetes   172.17.211.142:6443   23h
[root@7dgroup3 shell]#

查看node 列表

[root@7dgroup3 /]# kubectl -s https://172.17.211.142:6443 get nodes
NAME       STATUS    ROLES     AGE       VERSION
7dgroup    Ready     <none>    3d        v1.11.0
7dgroup2   Ready     <none>    3d        v1.11.0
7dgroup3   Ready     master    12d       v1.11.0

查看node详细信息

[root@7dgroup3 shell]# kubectl get node
NAME       STATUS   ROLES    AGE   VERSION
7dgroup    Ready    <none>   17m   v1.14.4
7dgroup2   Ready    <none>   13h   v1.14.4
7dgroup3   Ready    master   23h   v1.14.4
[root@7dgroup3 shell]# kubectl describe node 7dgroup
Name:               7dgroup
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
......

[root@7dgroup3 shell]#

查看kubelet配置信息

[root@7dgroup3 shell]# kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://172.17.211.142:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: REDACTED
client-key-data: REDACTED

查看kubernets版本

[root@7dgroup3 kubernets]# kubelet --version
Kubernetes v1.14.4
[root@7dgroup3 kubernets]#

查看config

[root@7dgroup3 ~]# kubeadm config view
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.14.4
networking:
dnsDomain: cluster.local
podSubnet: 100.64.0.0/10
serviceSubnet: 10.96.0.0/12
scheduler: {}

列出所需要的镜像列表

[root@7dgroup3 ~]# kubeadm config images list
W0728 10:09:45.567500   28248 version.go:98] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get https://dl.k8s.io/release/stable-1.txt: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
W0728 10:09:45.567584   28248 version.go:99] falling back to the local client version: v1.15.0
k8s.gcr.io/kube-apiserver:v1.15.0
k8s.gcr.io/kube-controller-manager:v1.15.0
k8s.gcr.io/kube-scheduler:v1.15.0
k8s.gcr.io/kube-proxy:v1.15.0
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.10
k8s.gcr.io/coredns:1.3.1

查看默认初始化参数配置

[root@7dgroup3 ~]# kubeadm config print init-defaults
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
......

查看pod的日志

1、查看指定pod的日志

kubectl logs <pod_name>
kubectl logs -f <pod_name> #类似tail -f的方式查看(tail -f 实时查看日志文件 tail -f 日志文件log)

2、查看指定pod中指定容器的日志

kubectl logs <pod_name> -c <container_name>
PS:查看Docker容器日志
docker logs <container_id>

查看pod的yaml文件

查看pod的yaml文件

kubectl get pod <pod-name> -n <ns-name> -o yaml

如下所示:

[root@7dgroup3 shell]# kubectl get pod -n kube-system kube-apiserver-7dgroup3 -o yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
kubernetes.io/config.hash: 4c09c523e34dd307dbfa1702d7e5f326
kubernetes.io/config.mirror: 4c09c523e34dd307dbfa1702d7e5f326
kubernetes.io/config.seen: "2019-07-27T11:32:59.183084282+08:00"
kubernetes.io/config.source: file
creationTimestamp: "2019-07-27T03:34:32Z"
......

[root@7dgroup3 shell]#

登录容器

登录容器的时候需要注意到容器支持的shell是什么。

kubectl exec -it <pod-name> -n <ns-name> bash
kubectl exec -it <pod-name> -n <ns-name> sh
[root@7dgroup3 shell]# kubectl exec -it monitoring-grafana-95cbdd789-fzl49 -n kube-system /bin/sh
/ # ls
bin         dashboards  dev         etc         home        proc        root        run.sh      sys         tmp         usr         var

当登录报如下错误:

kubectl  OCI runtime exec failed: exec failed: container_linux.go:345: starting container process ca

时,说明shell类型不对。

根据yaml创建资源

# 根据 yaml 创建资源, apply 可以重复执行,create 不行
kubectl create -f pod.yaml
kubectl apply -f pod.yaml

根据yaml删除pod

# 基于 pod.yaml 定义的名称删除 pod
kubectl delete -f pod.yaml

根据label删除pod和service

# 删除所有包含某个 label 的pod 和 service
kubectl delete pod,svc -l name=<label-name>

删除pod

[root@7dgroup3 ~]# kubectl get pods
NAME                 READY   STATUS    RESTARTS   AGE
frontend-2szjk       1/1     Running   0          3d1h
frontend-cv5qw       1/1     Running   0          3d1h
frontend-lp4tc       0/1     Evicted   0          3d2h
frontend-sccqj       1/1     Running   0          2d7h
redis-master-6ssmn   1/1     Running   3          3d1h
redis-slave-6vtrs    1/1     Running   1          3d2h
[root@7dgroup3 ~]# kubectl delete pod frontend-lp4tc
pod "frontend-lp4tc" deleted
[root@7dgroup3 ~]# kubectl get pods
NAME                 READY   STATUS    RESTARTS   AGE
frontend-2szjk       1/1     Running   0          3d1h
frontend-cv5qw       1/1     Running   0          3d1h
frontend-sccqj       1/1     Running   0          2d7h
redis-master-6ssmn   1/1     Running   3          3d1h
redis-slave-6vtrs    1/1     Running   1          3d2h
[root@7dgroup3 ~]#

查看node或pod的资源使用率

[root@7dgroup3 ~]# kubectl top nodes
NAME       CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
7dgroup    129m         6%     1567Mi          42%
7dgroup2   233m         11%    1811Mi          49%
7dgroup3   510m         25%    2651Mi          71%
[root@7dgroup3 ~]# kubectl top pod
NAME                 CPU(cores)   MEMORY(bytes)
frontend-2szjk       0m           16Mi
frontend-cv5qw       0m           16Mi
frontend-sccqj       0m           21Mi
redis-master-6ssmn   0m           1Mi
redis-slave-6vtrs    1m           8Mi

编辑pod的yaml文件

#编辑pod的yaml文件
kubectl get deployment -n <ns-name>
kubectl edit depolyment <pod-name> -n <ns-name> -o yaml

示例如下:

[root@7dgroup3 shell]# kubectl get deployment -n kube-system
NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
calico-kube-controllers   1/1     1            1           23h
coredns                   2/2     2            2           23h
heapster                  1/1     1            1           23h
kubernetes-dashboard      1/1     1            1           23h
monitoring-grafana        1/1     1            1           23h
monitoring-influxdb       1/1     1            1           23h
[root@7dgroup3 shell]# kubectl edit deployment monitoring-grafana -n kube-system -o yaml

进入POD

[root@7dgroup3 data]# kubectl exec -it monitoring-grafana-95cbdd789-fzl49 sh -n kube-system
/ # ls
bin         dashboards  dev         etc         home        proc        root        run.sh      sys         tmp         usr         var
/ #

配置文件目录

[root@7dgroup3 kubernetes]# pwd
/etc/kubernetes
[root@7dgroup3 kubernetes]# tree -h
.
├── [5.3K]  admin.conf
├── [5.4K]  controller-manager.conf
├── [5.3K]  kubelet.conf
├── [4.0K]  manifests
│   ├── [1.9K]  etcd.yaml
│   ├── [2.5K]  kube-apiserver.yaml
│   ├── [2.2K]  kube-controller-manager.yaml
│   └── [ 990]  kube-scheduler.yaml
├── [4.0K]  pki
│   ├── [1.2K]  apiserver.crt
│   ├── [1.1K]  apiserver-etcd-client.crt
│   ├── [1.6K]  apiserver-etcd-client.key
│   ├── [1.6K]  apiserver.key
│   ├── [1.1K]  apiserver-kubelet-client.crt
│   ├── [1.6K]  apiserver-kubelet-client.key
│   ├── [1.0K]  ca.crt
│   ├── [1.6K]  ca.key
│   ├── [4.0K]  etcd
│   │   ├── [1021]  ca.crt
│   │   ├── [1.6K]  ca.key
│   │   ├── [1.1K]  healthcheck-client.crt
│   │   ├── [1.6K]  healthcheck-client.key
│   │   ├── [1.1K]  peer.crt
│   │   ├── [1.6K]  peer.key
│   │   ├── [1.1K]  server.crt
│   │   └── [1.6K]  server.key
│   ├── [1.0K]  front-proxy-ca.crt
│   ├── [1.6K]  front-proxy-ca.key
│   ├── [1.0K]  front-proxy-client.crt
│   ├── [1.6K]  front-proxy-client.key
│   ├── [1.6K]  sa.key
│   └── [ 451]  sa.pub
└── [5.3K]  scheduler.conf
3 directories, 30 files
[root@7dgroup3 kubernetes]#

配置SSL

生成SSL

CA生成证书过程如下:

Zees-Air-2:ssl Zee$ openssl genrsa -des3 -passout pass:x -out dashboard.pass.key 2048
Generating RSA private key, 2048 bit long modulus
.....+++
........................+++
e is 65537 (0x10001)
Zees-Air-2:ssl Zee$ ll
total 32
-rw-r--r--  1 Zee  staff  1751 Nov 22 09:23 dashboard.pass.key
Zees-Air-2:ssl Zee$ openssl rsa -passin pass:x -in dashboard.pass.key -out 
......

Server证书生成如下:

Zees-Air-2:ssl Zee$ openssl genrsa -out server.key 2048
Generating RSA private key, 2048 bit long modulus
..................................................................................................................................................................+++
.............................................+++
e is 65537 (0x10001)
Zees-Air-2:ssl Zee$ ll
total 72
-rw-r--r--  1 Zee  staff  1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r--  1 Zee  staff  1679 Nov 22 09:23 dashboard.key
-rw-r--r--  1 Zee  staff  1009 Nov 22 09:24 dashboard.csr
-rw-r--r--  1 Zee  staff  1212 Nov 22 09:25 dashboard.crt
-rw-r--r--  1 Zee  staff  1212 Nov 22 09:28 dashboard.pem
-rw-r--r--  1 Zee  staff  1679 Nov 22 09:54 server.key
Zees-Air-2:ssl Zee$ openssl req -new -key server.key -subj "/CN=7dgroup3" -out server.csr
Zees-Air-2:ssl Zee$ ll
total 80
-rw-r--r--  1 Zee  staff  1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r--  1 Zee  staff  1679 Nov 22 09:23 dashboard.key
-rw-r--r--  1 Zee  staff  1009 Nov 22 09:24 dashboard.csr
-rw-r--r--  1 Zee  staff  1212 Nov 22 09:25 dashboard.crt
-rw-r--r--  1 Zee  staff  1212 Nov 22 09:28 dashboard.pem
-rw-r--r--  1 Zee  staff  1679 Nov 22 09:54 server.key
-rw-r--r--  1 Zee  staff   891 Nov 22 09:55 server.csr
Zees-Air-2:ssl Zee$ openssl x509 -req -in server.csr -CA dashboard.crt -CAkey dashboard.key -CAcreateserial -out server.crt -days 5000
Signature ok
subject=/CN=7dgroup3
Getting CA Private Key
Zees-Air-2:ssl Zee$ ll
total 96
-rw-r--r--  1 Zee  staff  1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r--  1 Zee  staff  1679 Nov 22 09:23 dashboard.key
-rw-r--r--  1 Zee  staff  1009 Nov 22 09:24 dashboard.csr
-rw-r--r--  1 Zee  staff  1212 Nov 22 09:25 dashboard.crt
-rw-r--r--  1 Zee  staff  1212 Nov 22 09:28 dashboard.pem
-rw-r--r--  1 Zee  staff  1679 Nov 22 09:54 server.key
-rw-r--r--  1 Zee  staff   891 Nov 22 09:55 server.csr
-rw-r--r--  1 Zee  staff  1094 Nov 22 09:56 server.crt
-rw-r--r--  1 Zee  staff    17 Nov 22 09:56 dashboard.srl
Zees-Air-2:ssl Zee$
【版权声明】本文为华为云社区用户原创内容,转载时必须标注文章的来源(华为云社区)、文章链接、文章作者等基本信息, 否则作者和本社区有权追究责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。