CKA练习

举报
小源博客 发表于 2023/04/11 13:20:35 2023/04/11
【摘要】 1.权限控制 RBAC[root@k8s-master-node1 ~]# kubectl config use-context k8s #考试时务必执行,切换集群。模拟环境中不需要执行。​[root@k8s-master-node1 ~]# kubectl create clusterrole deployment-clusterrole --verb=create --resourc...

1.权限控制 RBAC

[root@k8s-master-node1 ~]# kubectl config use-context k8s   #考试时务必执行,切换集群。模拟环境中不需要执行。
​
[root@k8s-master-node1 ~]# kubectl create clusterrole deployment-clusterrole --verb=create --resource=Deployment,StatefulSet,DaemonSet
clusterrole.rbac.authorization.k8s.io/deployment-clusterrole created
​
[root@k8s-master-node1 ~]# kubectl create ns app-team1
namespace/app-team1 created
​
[root@k8s-master-node1 ~]# kubectl -n app-team1 create serviceaccount cicd-token
serviceaccount/cicd-token created
​
[root@k8s-master-node1 ~]# kubectl -n app-team1 create rolebinding cicd-token-rolebinding --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token
rolebinding.rbac.authorization.k8s.io/cicd-token-rolebinding created
​
​
[root@k8s-master-node1 ~]# kubectl -n app-team1 describe rolebinding cicd-token-rolebinding    #检查
Name:         cicd-token-rolebinding
Labels:       <none>
Annotations:  <none>
Role:
  Kind:  ClusterRole
  Name:  deployment-clusterrole
Subjects:
  Kind            Name        Namespace
  ----            ----        ---------
  ServiceAccount  cicd-token  app-team1
  
  
  # 审查题目中,明确是要求指定的serviceaccount的权限是在整个集群范围,还是限定于某个命名空间,
如果是整个集群,则需要使用clusterrolebinding
[root@k8s-master-node1 ~]# kubectl create clusterrolebinding cicd-token-binding --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token
​


[root@k8s-master-node1 ~]# kubectl top pod -l name=cpu-loader --sort-by=cpu -A
​
[root@k8s-master-node1 ~]# mkdir /opt/KUTR000401/
[root@k8s-master-node1 ~]# echo "查出来的 Pod Name" > /opt/KUTR000401/KUTR00401.txt
​
​


[root@k8s-master-node1 ~]# kubectl create ns my-appnamespace/my-app created
​
​
​
[root@k8s-master-node1 ~]# cat networkpolicy.yaml  apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-port-from-namespace
  namespace: my-app
spec:
  podSelector:
    matchLabels: {}
  policyTypes:
    - Ingress
  ingress:
    - from:
        - namespaceSelector:
            matchLabels:
              project: echo
        - podSelector:
            matchLabels:
              role: frontend
      ports:
        - protocol: TCP
          port: 9000
​
​
[root@k8s-master-node1 ~]# kubectl apply -f networkpolicy.yaml 
networkpolicy.networking.k8s.io/allow-port-from-namespace created
​
验证
[root@k8s-master-node1 ~]# kubectl describe networkpolicy -n my-app 
Name:         allow-port-from-namespace
Namespace:    my-app
Created on:   2023-04-03 15:33:10 +0800 CST
Labels:       <none>
Annotations:  <none>
Spec:
  PodSelector:     <none> (Allowing the specific traffic to all pods in this namespace)
  Allowing ingress traffic:
    To Port: 9000/TCP
    From:
      NamespaceSelector: project=echo
    From:
      PodSelector: role=frontend
  Not affecting egress traffic
  Policy Types: Ingress
​


[root@k8s-master-node1 ~]# cat deployment.yaml     apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: front-end
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.14.2
        ports:
        - containerPort: 80
​
​
[root@k8s-master-node1 ~]# kubectl edit deployment front-end
​
        ports:
        - name: http
          containerPort: 80
          protocol: TCP
​
[root@k8s-master-node1 ~]# kubectl expose deployment front-end --type=NodePort --port=80 --target-port=80 --name=front-end-svc
service/front-end-svc exposed
​
​
[root@k8s-master-node1 ~]# kubectl get svc front-end-svc -o wide
NAME            TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)       AGE   SELECTOR
front-end-svc   NodePort   10.96.183.111   <none>        80:2984/TCP   20s   app=front-end
[root@k8s-master-node1 ~]# kubectl get deployment front-end -o wide
NAME        READY   UP-TO-DATE   AVAILABLE   AGE    CONTAINERS   IMAGES         SELECTOR
front-end   3/3     2            3           4m8s   nginx        nginx:1.14.2   app=front-end


[root@k8s-master-node1 ~]# cat ingress.yaml              
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
  labels:
    app.kubernetes.io/component: controller
  name: nginx-example
  annotations:
    ingressclass.kubernetes.io/is-default-class: "true"
spec:
  controller: k8s.io/ingress-nginx
​
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ping
  namespace: ing-internal
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  ingressClassName: nginx-example
  rules:
  - http:
      paths:
      - path: /hello
        pathType: Prefix
        backend:
          service:
            name: hello
            port:
              number: 5678
​
[root@k8s-master-node1 ~]# kubectl apply -f ingress.yaml  
ingressclass.networking.k8s.io/nginx-example created
ingress.networking.k8s.io/ping created
​
​
​
​
​
​
​


[root@k8s-master-node1 ~]# kubectl scale deployment presentation --replicas=4
​
[root@localhost ~]# kubectl get deployments presentation -o wide
[root@localhost ~]# kubectl get pod -l app=presentation
​


#列出节点亲和性
[root@localhost ~]# kubectl get nodes --show-labels
​
#节点打标签
[root@localhost ~]# kubectl label nodes k8s-master-node1 disktype=ssd
node/k8s-master-node1 labeled
​
[root@localhost ~]# cat nginx.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx-kusc00401
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: disktype
            operator: In
            values:
            - ssd            
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
​
[root@localhost ~]# kubectl get pod nginx-kusc00401 -o wide
NAME              READY   STATUS    RESTARTS   AGE     IP           NODE               NOMINATED NODE   READINESS GATES
nginx-kusc00401   1/1     Running   0          3m10s   10.244.0.7   k8s-master-node1   <none>           <none>


[root@localhost ~]# kubectl describe nodes | grep -i Taints | grep -vc NoSchedule
2
​
[root@localhost ~]# mkdir /opt/KUSC00402
[root@localhost ~]# echo "2" > /opt/KUSC00402/kusc00402.txt
[root@localhost ~]# 
​
[root@localhost ~]# kubectl describe nodes | grep -i Taints
Taints:             <none>
Taints:             <none>
[root@localhost ~]# 
​
​
​


​
[root@localhost ~]# cat pods.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: kucc8
spec:
  containers:
  - name: nginx
    image: nginx
  - name: consul
    image: consul
​
​
[root@localhost ~]# kubectl apply -f pods.yaml 
pod/kucc8 created
​
[root@localhost ~]# kubectl get pod kucc8     
NAME    READY   STATUS    RESTARTS   AGE
kucc8   2/2     Running   0          2m5s
​


[root@localhost ~]# cat valume.yaml              
apiVersion: v1
kind: PersistentVolume
metadata:
  name: app-config
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/srv/app-config"
​
[root@localhost ~]# kubectl get pv
NAME         CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
app-config   1Gi        RWO            Retain           Available           manual                  27s
[root@localhost ~]# 
​
​
​


​
[root@localhost ~]# kubectl apply -f pvc.yaml 
persistentvolumeclaim/pv-volume created
pod/web-server created
[root@localhost ~]# cat pvc.yaml              
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-volume
spec:
  storageClassName: csi-hostpath-sc
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Mi
​
---
apiVersion: v1
kind: Pod
metadata:
  name: web-server
spec:
  volumes:
    - name: task-pv-storage
      persistentVolumeClaim:
        claimName: task-pv-claim
  containers:
    - name: pv-volume
      image: nginx
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: task-pv-storage
​
​
​


[root@localhost ~]# kubectl logs kucc8 | grep "RLIMIT_NOFILE" > /opt/KUTR00101/kucc8
[root@localhost ~]# cat /opt/KUTR00101/nginx 
2023/04/03 13:39:24 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 655360:655360
[root@localhost ~]# 
​
​


[root@localhost ~]# cat count.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: counter
spec:
  containers:
  - name: count
    image: busybox:1.28
    args:
    - /bin/sh
    - -c
    - >
      i=0;
      while true;
      do
        echo "$i: $(date)" >> /var/log/1.log;
        echo "$(date) INFO $i" >> /var/log/2.log;
        i=$((i+1));
        sleep 1;
      done      
    volumeMounts:
    - name: varlog
      mountPath: /var/log
  - name: count-log-1
    image: busybox:1.28
    args: [/bin/sh, -c, 'tail -n+1 -F /var/log/1.log']
    volumeMounts:
    - name: varlog
      mountPath: /var/log
  volumes:
  - name: varlog
    emptyDir: {}
​
​
​
​
​
​
​
​
​
​


[root@localhost ~]# kubectl get nodes
NAME               STATUS   ROLES                         AGE   VERSION
k8s-master-node1   Ready    control-plane,master,worker   92m   v1.22.1
k8s-worker-node1   Ready    worker                        92m   v1.22.1
[root@localhost ~]# kubectl cordon k8s-master-node1   #停止调度
node/k8s-master-node1 cordoned
​
[root@localhost ~]# kubectl drain k8s-master-node1 --ignore-daemonsets   #腾空节点
node/k8s-master-node1 already cordoned
DEPRECATED WARNING: Aborting the drain command in a list of nodes will be deprecated in v1.23.
The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain.
For now, users can try such experience via: --ignore-errors
error: unable to drain node "k8s-master-node1", aborting command...
​
There are pending nodes to be drained:
 k8s-master-node1
cannot delete Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use --force to override): default/nginx-kusc00401
cannot delete Pods with local storage (use --delete-emptydir-data to override): kube-system/metrics-server-77564bc84d-dpgw5
​
​
[root@localhost ~]# yum list --showduplicates kubeadm --disableexcludes=kubernetes
​
[root@localhost ~]# yum install -y kubeadm-1.26.x-0 --disableexcludes=kubernetes
[root@localhost ~]# kubeadm version
[root@localhost ~]# kubeadm upgrade plan
​
参考:https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/
​
​


​
[root@localhost ~]# export ETCDCTL_API=3
​
[root@localhost ~]# etcdctl --endpoints=https://192.168.73.10:2379 --cacert="/etc/kubernetes/pki/etcd/ca.crt" --cert="/etc/kubernetes/pki/etcd/server.crt" --key="/etc/kubernetes/pki/etcd/server.key" snapshot save etcd-snapshot.db
​
[root@localhost ~]# etcdctl snapshot status etcd-snapshot.db -wtable                
Deprecated: Use `etcdutl snapshot status` instead.
​
+----------+----------+------------+------------+
|   HASH   | REVISION | TOTAL KEYS | TOTAL SIZE |
+----------+----------+------------+------------+
| f7b412a9 |    10958 |       1034 |     3.0 MB |
+----------+----------+------------+------------+
​
​
​
#还原
[root@localhost ~]# etcdctl --endpoints=https://192.168.73.10:2379 --cacert="/etc/kubernetes/pki/etcd/ca.crt" --cert="/etc/kubernetes/pki/etcd/server.crt" --key="/etc/kubernetes/pki/etcd/server.key" snapshot restore etcd-snapshot.db 
​


kubelet服务挂掉,重启服务即可


[root@localhost ~]# kubectl drain k8s-worker-node1 --ignore-daemonsets
​
[root@localhost ~]# kubectl drain k8s-worker-node1 --ignore-daemonsets --delete-emptydir-data --force 
​
[root@localhost ~]# kubectl get node
[root@localhost ~]# kubectl get pod -A -o wide|grep node02



[root@k8s-master-node1 ~]# kubectl create clusterrole deployment-clusterrole --verb=create --resource=deployments,daemonsets,statefulsets
clusterrole.rbac.authorization.k8s.io/deployment-clusterrole created
[root@k8s-master-node1 ~]# kubectl -n app-team1 create serviceaccount cicd-token
serviceaccount/cicd-token created
​
[root@k8s-master-node1 ~]# kubectl -n app-team1 create rolebinding cicd-token-binding --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token
rolebinding.rbac.authorization.k8s.io/cicd-token-binding created
#验证
[root@k8s-master-node1 ~]# kubectl create clusterrolebinding cicd-token-binding --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token
clusterrolebinding.rbac.authorization.k8s.io/cicd-token-binding created




[root@k8s-master-node1 ~]# kubectl cordon k8s-master-node1
node/k8s-master-node1 cordoned
[root@k8s-master-node1 ~]# kubectl drain k8s-master-node1 --ignore-daemonsets
node/k8s-master-node1 already cordoned

参考:https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/

[root@k8s-master-node1 ~]# docker ps -a |grep etcd 
[root@k8s-master-node1 ~]# docker cp 6e4785b9d160:/usr/local/bin/etcdctl /usr/bin/
[root@k8s-master-node1 ~]# etcdctl version 
etcdctl version: 3.5.0
API version: 3.5
​
备份
[root@k8s-master-node1 ~]# etcdctl snapshot save --endpoints=127.0.0.1:2379 --cacert="/etc/kubernetes/pki/etcd/ca.crt" --cert="/etc/kubernetes/pki/etcd/server.crt" --key="/etc/kubernetes/pki/etcd/server.key" /data/backup/etcd-snapshot.db
​
还原
[root@k8s-master-node1 ~]# etcdctl snapshot restore --endpoints=127.0.0.1:2379 --cacert="/etc/kubernetes/pki/etcd/ca.crt" --cert="/etc/kubernetes/pki/etcd/server.crt" --key="/etc/kubernetes/pki/etcd/server.key" /data/backup/etcd-snapshot.db     
​
​
​
​
​

​
[root@k8s-master-node1 ~]# cat networkpolicy.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-port-from-namespace
  namespace: internal
spec:
  podSelector: {}
  policyTypes:
    - Ingress
  ingress:
    - from:
        - podSelector: {}
      ports:
        - protocol: TCP
          port: 8080
​
[root@k8s-master-node1 ~]# kubectl apply -f networkpolicy.yaml 
networkpolicy.networking.k8s.io/allow-port-from-namespace created
​
​


​
[root@k8s-master-node1 ~]# cat deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: front-end-svc
  labels:
    app: front-end-svc
spec:
  replicas: 3
  selector:
    matchLabels:
      app: front-end-svc
  template:
    metadata:
      labels:
        app: front-end-svc
    spec:
      containers:
      - name: nginx
        image: nginx:1.14.2
        ports:
        - containerPort: 80
​
​
[root@k8s-master-node1 ~]# cat nodeport.yaml
apiVersion: v1
kind: Service
metadata:
  name: front-end-svc
  creationTimestamp: null
  labels:
    app: front-end-svc
spec:
  ports:
  - name: 80-80
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: front-end-svc
    type: NodePort
status:
  loadBalancer: {}
​

[root@k8s-master-node1 ~]# cat ingress.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ping
  namespace: ing-internel
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  ingressClassName: nginx-example
  rules:
  - http:
      paths:
      - path: /hi
        pathType: Prefix
        backend:
          service:
            name: hi
            port:
              number: 5678
​
​
​


[root@k8s-master-node1 ~]# kubectl scale deployment.apps/front-end-svc --replicas=4
deployment.apps/front-end-svc scaled
[root@k8s-master-node1 ~]# 


[root@k8s-master-node1 ~]# kubectl label nodes k8s-master-node1 disktype=spinning
node/k8s-master-node1 labeled
[root@k8s-master-node1 ~]# 
​
[root@k8s-master-node1 ~]# cat disktype.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx-kusc00401
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: disktype
            operator: In
            values:
            - spinning         
  containers:
  - name: nginx-kusc00401
    image: nginx
    imagePullPolicy: IfNotPresent
​
​
[root@k8s-master-node1 ~]# kubectl get pod nginx-kusc00401 -o wide               
NAME              READY   STATUS              RESTARTS   AGE    IP       NODE               NOMINATED NODE   READINESS GATES
nginx-kusc00401   0/1     ContainerCreating   0          117s   <none>   k8s-worker-node1   <none>           <


[root@k8s-master-node1 ~]# kubectl describe nodes |grep -i taint
Taints:             node.kubernetes.io/unschedulable:NoSchedule
Taints:             <none>
[root@k8s-master-node1 ~]# echo 2 >/opt/test.txt
​

[root@k8s-master-node1 ~]# kubectl apply -f pods11.yaml 
pod/nginx created
[root@k8s-master-node1 ~]# cat pods11.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx
  - name: redis
    image: redis
  - name: memcached
    image: memcached
  - name: consul
    image: consul

[root@k8s-master-node1 ~]# kubectl apply -f pv.yaml 
persistentvolume/app-config created
[root@k8s-master-node1 ~]# kubectl get pv
NAME         CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
app-config   1Gi        RWO            Retain           Available           manual                  5s
[root@k8s-master-node1 ~]# cat pv.yaml              
apiVersion: v1
kind: PersistentVolume
metadata:
  name: app-config
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/srv/app-config"
​
​
​
​

[root@k8s-master-node1 ~]# cat pod-pvc.yaml
apiVersion: v1
kind: Pod
metadata:
  name: task-pv-pod
spec:
  volumes:
    - name: pv-volume
      persistentVolumeClaim:
        claimName: pv-volume
  containers:
    - name: nginx
      image: nginx
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: pv-volume
​
[root@k8s-master-node1 ~]# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-volume
spec:
  storageClassName: csi-hostpath-sc
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Mi
​
​
​
​


[root@k8s-master-node1 ~]# kubectl logs nginx-kusc00401 >/opt/KUTR00101/nginx
[root@k8s-master-node1 ~]# cat /opt/KUTR00101/nginx 

https://kubernetes.io/zh-cn/docs/concepts/cluster-administration/logging/
官网:
[root@k8s-master-node1 ~]# cat test.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: counter
spec:
  containers:
  - name: count
    image: busybox
    args:
    - /bin/sh
    - -c
    - >
      i=0;
      while true;
      do
        echo "$i: $(date)" >> /var/log/1.log;
        echo "$(date) INFO $i" >> /var/log/2.log;
        i=$((i+1));
        sleep 1;
      done
    volumeMounts:
    - name: varlog
      mountPath: /var/log
  - name: count-log-1
    image: busybox:1.28
    args: [/bin/sh, -c, 'tail -n+1 -F /var/log/1.log']
    volumeMounts:
    - name: varlog
      mountPath: /var/log
  - name: count-log-2
    image: busybox:1.28
    args: [/bin/sh, -c, 'tail -n+1 -F /var/log/2.log']
    volumeMounts:
    - name: varlog
      mountPath: /var/log
  volumes:
  - name: varlog
    emptyDir: {}
​
​
​
​
kubectl apply -f test.yaml 
kubectl get pods
kubectl get po counter -oyaml
kubectl get po counter -oyaml >15.pod.yaml
​
[root@k8s-master-node1 ~]# cat 15.pod.yaml 
apiVersion: v1
kind: Pod
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"name":"counter","namespace":"default"},"spec":{"containers":[{"args":["/bin/sh","-c","i=0; while true; do\n  echo \"$i: $(date)\" \u003e\u003e /var/log/1.log;\n  echo \"$(date) INFO $i\" \u003e\u003e /var/log/2.log;\n  i=$((i+1));\n  sleep 1;\ndone\n"],"image":"busybox","name":"count","volumeMounts":[{"mountPath":"/var/log","name":"varlog"}]},{"args":["/bin/sh","-c","tail -n+1 -F /var/log/1.log"],"image":"busybox:1.28","name":"count-log-1","volumeMounts":[{"mountPath":"/var/log","name":"varlog"}]},{"args":["/bin/sh","-c","tail -n+1 -F /var/log/2.log"],"image":"busybox:1.28","name":"count-log-2","volumeMounts":[{"mountPath":"/var/log","name":"varlog"}]}],"volumes":[{"emptyDir":{},"name":"varlog"}]}}
  creationTimestamp: "2023-04-04T02:48:58Z"
  name: big-corp-app
  namespace: default
  resourceVersion: "50253"
  uid: cca038b9-d4b1-46c9-a0e6-338d6df78e31
spec:
  containers:
  - args:
    - /bin/sh
    - -c
    - |
      i=0; while true; do
        echo "$i: $(date)" >> /var/log/big-corp-app.log;    #
        echo "$(date) INFO $i" >> /var/log/big-corp-app.log;   #
        i=$((i+1));
        sleep 1;
      done
    image: busybox
    imagePullPolicy: Always
    name: count
    resources: {}
    terminationMessagePath: /dev/termination-log   #
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/log
      name: varlog
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-pl5bv
      readOnly: true
  - args:
    - /bin/sh
    - -c
    - tail -n+1 -F /var/log/big-corp-app.log   #
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    name: count-log-1
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/log
      name: varlog
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-pl5bv
      readOnly: true
  - args:
    - /bin/sh
    - -c
    - tail -n+1 -F /var/log/big-corp-app.log    #
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    name: count-log-2
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/log
      name: varlog
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-pl5bv
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: k8s-worker-node1
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: default
  serviceAccountName: default
  terminationGracePeriodSeconds: 30
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - emptyDir: {}
    name: varlog
  - name: kube-api-access-pl5bv
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2023-04-04T02:48:58Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2023-04-04T02:49:04Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2023-04-04T02:49:04Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2023-04-04T02:48:58Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: docker://749c8d997c629b01f5229b6c49cd9eec743fc458e44657835caff64feab2065c
    image: busybox:latest
    imageID: docker-pullable://busybox@sha256:b5d6fe0712636ceb7430189de28819e195e8966372edfc2d9409d79402a0dc16
    lastState: {}
    name: count
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2023-04-04T02:49:03Z"
  - containerID: docker://e5100bccd9c44c487ab0764e7f3d6a5a7f490a88df799360f49462283fbd480e
    image: busybox:1.28
    imageID: docker-pullable://busybox@sha256:141c253bc4c3fd0a201d32dc1f493bcf3fff003b6df416dea4f41046e0f37d47
    lastState: {}
    name: count-log-1
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2023-04-04T02:49:04Z"
  - containerID: docker://139736e7b10a534d77081d3d09177aea06c144e2a65bc22679f49021ef60bf35
    image: busybox:1.28
    imageID: docker-pullable://busybox@sha256:141c253bc4c3fd0a201d32dc1f493bcf3fff003b6df416dea4f41046e0f37d47
    lastState: {}
    name: count-log-2
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2023-04-04T02:49:04Z"
  hostIP: 192.168.73.20
  phase: Running
  podIP: 10.244.1.14
  podIPs:
  - ip: 10.244.1.14
  qosClass: BestEffort
  startTime: "2023-04-04T02:48:58Z"
​
​
[root@k8s-master-node1 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
big-corp-app                    3/3     Running   0          2m32s
front-end-svc-98f8b8945-9q5d9   1/1     Running   0          4h9m
front-end-svc-98f8b8945-bnq8h   1/1     Running   0          4h29m
front-end-svc-98f8b8945-bp747   1/1     Running   0          4h29m
front-end-svc-98f8b8945-t5w68   1/1     Running   0          4h29m
nginx                           4/4     Running   0          3h54m
nginx-kusc00401                 1/1     Running   0          4h2m
task-pv-pod                     0/1     Pending   0          3h34m
​
​
​
​

[root@k8s-master-node1 ~]# kubectl top pods name=big-corp-app --sort-by='cpu' --no-headers|awk 'NR==1 {print $1}' >/opt/KUTR00101/KUTR00401.txt  
​
​

[root@k8s-master-node1 ~]# systemctl restart kubelet
[root@k8s-master-node1 ~]# systemctl status kubelet 
【版权声明】本文为华为云社区用户原创内容,未经允许不得转载,如需转载请自行联系原作者进行授权。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。