821130.

举报
yd_211651640 发表于 2025/04/09 19:10:16 2025/04/09
【摘要】 111

RocketChat上云

1.拷贝安装包到ECS上

使用SFTP工具或SCP拷贝到ECS上

[root@chinaskill ~]# ls
rocketchat-cloud.tar.gz

2.解压安装包

[root@chinaskill ~]# tar -zxvf rocketchat-cloud.tar.gz
[root@chinaskill ~]# ls
rocketchat-cloud  rocketchat-cloud.tar.gz
[root@chinaskill ~]# cd rocketchat-cloud
[root@chinaskill rocketchat-cloud]# ll
total 287584
-rw-r--r-- 1 501 games 162088460 Jul 11  2022 rocket.chat-3.4.1.tgz
-rw-r--r-- 1 501 games 132375089 Jul 31  2022 rocketchat-repo.tar.gz
-rw-r--r-- 1 501 games     13130 Jul 11  2022 setup_12.x
[root@chinaskill rocketchat-cloud]# tar -zxvf rocket.chat-3.4.1.tgz
[root@chinaskill rocketchat-cloud]# tar -zxvf rocketchat-repo.tar.gz
[root@chinaskill rocketchat-cloud]# ls
bundle  rocket.chat-3.4.1.tgz  rocketchat-repo  rocketchat-repo.tar.gz  setup_12.x
[root@chinaskill rocketchat-cloud]# cd bundle
[root@chinaskill bundle]# ls
main.js  programs  README  server  star.json

1.查看依赖

[root@chinaskill bundle]# ls
main.js  programs  README  server  star.json
[root@chinaskill bundle]# cd programs
[root@chinaskill programs]# ls
server  web.browser  web.browser.legacy
[root@chinaskill programs]# cd server
[root@chinaskill server]# ls
app      boot.js.map        config.json   mini-files.js      npm-rebuild-args.js      npm-rebuild.js.map  npm-require.js.map   packages        program.json    server-json.js
assets   boot-utils.js      debug.js      mini-files.js.map  npm-rebuild-args.js.map  npm-rebuilds.json   npm-shrinkwrap.json  profile.js      runtime.js      server-json.js.map
boot.js  boot-utils.js.map  debug.js.map  npm                npm-rebuild.js           npm-require.js      package.json         profile.js.map  runtime.js.map
#查看依赖包
#node语言是以package.json控制
#python是以requirement.txt控制
#java是以pom.xml控制
#golang是以go.mod控制
[root@chinaskill server]# cat package.json
{
  "name": "meteor-dev-bundle",
  "private": true,
  "dependencies": {
    "fibers": "4.0.3",
    "meteor-promise": "0.8.7",
    "promise": "8.0.2",
    "reify": "0.20.12",
    "@babel/parser": "7.9.4",
    "@types/underscore": "1.9.2",
    "underscore": "1.9.1",
    "source-map-support": "https://github.com/meteor/node-source-map-support/tarball/1912478769d76e5df4c365e147f25896aee6375e",
    "@types/semver": "5.4.0",
    "semver": "5.4.1",
    "node-gyp": "6.0.1",
    "node-pre-gyp": "0.14.0"
  },
  "devDependencies": {
    "split2": "2.1.1",
    "multipipe": "1.0.2",
    "chalk": "0.5.1"
  },
  "scripts": {
    "install": "node npm-rebuild.js"
  }
}

2.查看当前Rocketchat版本需要用到的node版本

[root@chinaskill bundle]# cat star.json
{
  "format": "site-archive-pre1",
  "builtBy": "Meteor METEOR@1.10.2",
  "programs": [
    {
      "name": "web.browser",
      "arch": "web.browser",
      "path": "programs/web.browser/program.json"
    },
    {
      "name": "web.browser.legacy",
      "arch": "web.browser.legacy",
      "path": "programs/web.browser.legacy/program.json"
    },
    {
      "name": "server",
      "arch": "os",
      "path": "programs/server/boot.js"
    }
  ],
  "meteorRelease": "METEOR@1.10.2",
  "nodeVersion": "12.16.1", #node的版本为12.16.1 后面会用到
  "npmVersion": "6.14.0",
  "gitCommitHash": "21157c0c4fe555434bd6b08ee7d7a847a3f8fb15"

3.查看当前版本

[root@chinaskill server]# cd ..
[root@chinaskill programs]# ls
server  web.browser  web.browser.legacy
[root@chinaskill programs]# cd ..
[root@chinaskill bundle]# ls
main.js  programs  README  server  star.json
[root@chinaskill bundle]# vim README
This is a Meteor application bundle. It has only one external dependency:
Node.js v12.16.1. To run the application: #node版本号
​
  $ (cd programs/server && npm install)
  $ export MONGO_URL='mongodb://user:password@host:port/databasename'
  $ export ROOT_URL='http://example.com'
  $ export MAIL_URL='smtp://user:password@mailhost:port/'
  $ node main.js
​
Use the PORT environment variable to set the port where the
application will listen. The default is 80, but that will require
root on most systems.
​
Find out more about Meteor at meteor.com.

3.安装mongodb

1.配置YUM仓库

[root@chinaskill bundle]# cat > /etc/yum.repos.d/mongodb.repo << EOF
[mongodb-org-4.4]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/7/mongodb-org/4.4/x86_64/
gpgcheck=0
enabled=1
EOF

2.安装Mongodb

[root@chinaskill bundle]# yum install -y mongodb-org

3.配置主从数据库

[root@chinaskill bundle]# vim /etc/mongod.conf
replication:
  replSetName: rs01
#初始化主从数据库(主节点)
[root@chinaskill bundle]# systemctl enable --now mongod
[root@chinaskill bundle]# mongo
rs.initiate()
> rs.initiate()
{
        "info2" : "no configuration specified. Using a default configuration for the set",
        "me" : "127.0.0.1:27017",
        "ok" : 1
}
rs.conf()
rs01:SECONDARY> rs.conf()
{
        "_id" : "rs01",
        "version" : 1,
        "term" : 1,
        "protocolVersion" : NumberLong(1),
        "writeConcernMajorityJournalDefault" : true,
        "members" : [
                {
                        "_id" : 0,
                        "host" : "127.0.0.1:27017",
                        "arbiterOnly" : false,
                        "buildIndexes" : true,
                        "hidden" : false,
                        "priority" : 1,
                        "tags" : {
​
                        },
                        "slaveDelay" : NumberLong(0),
                        "votes" : 1
                }
        ],
        "settings" : {
                "chainingAllowed" : true,
                "heartbeatIntervalMillis" : 2000,
                "heartbeatTimeoutSecs" : 10,
                "electionTimeoutMillis" : 10000,
                "catchUpTimeoutMillis" : -1,
                "catchUpTakeoverDelayMillis" : 30000,
                "getLastErrorModes" : {
​
                },
                "getLastErrorDefaults" : {
                        "w" : 1,
                        "wtimeout" : 0
                },
                "replicaSetId" : ObjectId("67e4e523d5bd0ad66e191f16")
        }
}
rs.status() #查看状态
rs01:PRIMARY> rs.status()
{
        "set" : "rs01",
        "date" : ISODate("2025-03-27T05:44:21.827Z"),
        "myState" : 1,
        "term" : NumberLong(1),
        "syncSourceHost" : "",
        "syncSourceId" : -1,
        "heartbeatIntervalMillis" : NumberLong(2000),
        "majorityVoteCount" : 1,
        "writeMajorityCount" : 1,
        "votingMembersCount" : 1,
        "writableVotingMembersCount" : 1,
        "optimes" : {
                "lastCommittedOpTime" : {
                        "ts" : Timestamp(1743054255, 1),
                        "t" : NumberLong(1)
                },
                "lastCommittedWallTime" : ISODate("2025-03-27T05:44:15.211Z"),
                "readConcernMajorityOpTime" : {
                        "ts" : Timestamp(1743054255, 1),
                        "t" : NumberLong(1)
                },
                "readConcernMajorityWallTime" : ISODate("2025-03-27T05:44:15.211Z"),
                "appliedOpTime" : {
                        "ts" : Timestamp(1743054255, 1),
                        "t" : NumberLong(1)
                },
                "durableOpTime" : {
                        "ts" : Timestamp(1743054255, 1),
                        "t" : NumberLong(1)
                },
                "lastAppliedWallTime" : ISODate("2025-03-27T05:44:15.211Z"),
                "lastDurableWallTime" : ISODate("2025-03-27T05:44:15.211Z")
        },
        "lastStableRecoveryTimestamp" : Timestamp(1743054235, 1),
        "electionCandidateMetrics" : {
                "lastElectionReason" : "electionTimeout",
                "lastElectionDate" : ISODate("2025-03-27T05:41:55.181Z"),
                "electionTerm" : NumberLong(1),
                "lastCommittedOpTimeAtElection" : {
                        "ts" : Timestamp(0, 0),
                        "t" : NumberLong(-1)
                },
                "lastSeenOpTimeAtElection" : {
                        "ts" : Timestamp(1743054115, 1),
                        "t" : NumberLong(-1)
                },
                "numVotesNeeded" : 1,
                "priorityAtElection" : 1,
                "electionTimeoutMillis" : NumberLong(10000),
                "newTermStartDate" : ISODate("2025-03-27T05:41:55.200Z"),
                "wMajorityWriteAvailabilityDate" : ISODate("2025-03-27T05:41:55.219Z")
        },
        "members" : [
                {
                        "_id" : 0,
                        "name" : "127.0.0.1:27017",
                        "health" : 1,
                        "state" : 1,
                        "stateStr" : "PRIMARY",
                        "uptime" : 223,
                        "optime" : {
                                "ts" : Timestamp(1743054255, 1),
                                "t" : NumberLong(1)
                        },
                        "optimeDate" : ISODate("2025-03-27T05:44:15Z"),
                        "lastAppliedWallTime" : ISODate("2025-03-27T05:44:15.211Z"),
                        "lastDurableWallTime" : ISODate("2025-03-27T05:44:15.211Z"),
                        "syncSourceHost" : "",
                        "syncSourceId" : -1,
                        "infoMessage" : "",
                        "electionTime" : Timestamp(1743054115, 2),
                        "electionDate" : ISODate("2025-03-27T05:41:55Z"),
                        "configVersion" : 1,
                        "configTerm" : 1,
                        "self" : true,
                        "lastHeartbeatMessage" : ""
                }
        ],
        "ok" : 1,
        "$clusterTime" : {
                "clusterTime" : Timestamp(1743054255, 1),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        },
        "operationTime" : Timestamp(1743054255, 1)
}
#从节点加入主从数据库(从节点)
rs.add("${IP}",flase)

4.安装node环境

1.安装本次node依赖软件包

[root@chinaskill bundle]# yum install -y npm nodejs gcc make epel-release && sudo yum install -y GraphicsMagick
[root@chinaskill server]# yum groupinstall 'Development Tools' yum install gcc-c++
​

2.修改node版本

[root@chinaskill bundle]# node -v
v16.20.2

3.安装修改node版本的命令

# 使用nodejs的包管理工具 全局安装n命令
[root@chinaskill bundle]# npm install -g n
# 使用n命令切换nodejs版本
[root@chinaskill bundle]# n v12.16.1
  installing : node-v12.16.1
       mkdir : /usr/local/n/versions/node/12.16.1
       fetch : https://nodejs.org/dist/v12.16.1/node-v12.16.1-linux-x64.tar.xz
     copying : node/12.16.1
   installed : v12.16.1 (with npm 6.13.4)
​
Note: the node command changed location and the old location may be remembered in your current shell.
         old : /usr/bin/node
         new : /usr/local/bin/node
If "node --version" shows the old version then start a new shell, or reset the location hash with:
hash -r  (for bash, zsh, ash, dash, and ksh)
rehash   (for csh and tcsh)

1.安装好退出重进

重新连接ECS云服务器

2.查看版本

[root@chinaskill ~]# node -v
v12.16.1

4.下载所有node依赖包

[root@chinaskill ~]# cd /root/rocketchat-cloud/bundle/programs/server
# 使用node的包管理工具npm来自动解决依赖
# 会基于当前路径下的package.json来安装一系列的依赖包
[root@chinaskill server]# npm i
​

5.启动服务

1.查看启动方式

[root@chinaskill bundle]# cat README
This is a Meteor application bundle. It has only one external dependency:
Node.js v12.16.1. To run the application:
​
  $ (cd programs/server && npm install)
  $ export MONGO_URL='mongodb://user:password@host:port/databasename'
  $ export ROOT_URL='http://example.com'
  $ export MAIL_URL='smtp://user:password@mailhost:port/'
  $ node main.js
​
Use the PORT environment variable to set the port where the
application will listen. The default is 80, but that will require
root on most systems.
​
Find out more about Meteor at meteor.com.

2.添加启动参数

export MONGO_URL='mongodb://127.0.0.1:27017/rocketchat'
export ROOT_URL='http://1.94.162.248:3000'
export PORT=3000
export MONGO_OPLOG_URL=mongodb://127.0.0.1:27017/local?replicaSet=rs01

3.启动服务

[root@chinaskill bundle]# node main.js
LocalStore: store created at
LocalStore: store created at
LocalStore: store created at
Setting default file store to GridFS
{"line":"120","file":"migrations.js","message":"Migrations: Not migrating, already at version 197","time":{"$date":1743058808696},"level":"info"}
Loaded the Apps Framework and loaded a total of 0 Apps!
Using GridFS for custom sounds storage
Using GridFS for custom emoji storage
Browserslist: caniuse-lite is outdated. Please run next command `npm update`
Updating process.env.MAIL_URL
(node:30615) [DEP0005] DeprecationWarning: Buffer() is deprecated due to security and usability issues. Please use the Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() methods instead.
➔ System ➔ startup
➔ +-------------------------------------------------+
➔ |                  SERVER RUNNING                 |
➔ +-------------------------------------------------+
➔ |                                                 |
➔ |  Rocket.Chat Version: 3.4.1                     |
➔ |       NodeJS Version: 12.16.1 - x64             |
➔ |      MongoDB Version: 4.4.29                    |
➔ |       MongoDB Engine: wiredTiger                |
➔ |             Platform: linux                     |
➔ |         Process Port: 3000                      |
➔ |             Site URL: http://1.94.162.248:3000  |
➔ |     ReplicaSet OpLog: Enabled                   |
➔ |          Commit Hash: 21157c0c4f                |
➔ |        Commit Branch: HEAD                      |
➔ |                                                 |
➔ +-------------------------------------------------+

6.无法访问解决

  • 检查服务启动状态

  • 检查安全组是否放通3000端口

公有云CCE集群

1.云容器引擎管理

创建一个 x86 架构集群,具体要求如下:

(1)集群名称:kcloud;

(2)集群版本:v1.21;

(3)地域:上海一;

(4)集群管理规模:50 节点;

(5)控制节点数:3;

(6)节点使用子网:intnetX-server;

(7)Pod 实例上限:64;

(8)容器网段:10.10.0.0/16。

节点配置信息要求如下:

(1)节点名称:kcloud-server;

(2)节点规格:c6s.xlarge.2

(3)节点:EulerOS 2.9

2.云容器管理

使用插件管理在kcloud容器集群中安装dashboard可视化监控界面。

3.使用kubectl操作集群

在 kcloud 集群中安装 kubectl 命令,使用 kubectl 命令管理 kcloud 集群。

# kubectl命令默认读取-/.kube/config文件作为集群的配置文件
# 在自行搭建的k8s集群中config文件默认在/etc/kubernetes/admin.conf
# 由于我们使用的是一键安装的命令所以拷贝config文件的操作是脚本帮我们执行
# config文件中指定了我们连接的api-server地址以及一些ca证书的信息,我们使用用户的信息
# 就如同我们openstack中的admin-openrc.sh

4.云硬盘存储卷

购买云硬盘存储卷,具体配置如下:

(1)名称为:kcloud-disk;

(2)集群:kcloud;

(3)容量:30G;

(4)类型:高 I/O;

5.工作负载

使用容器镜像服务作为镜像仓库,将提供的httpd-2.4.33和httpd-2.4.35镜像上传至服务中。在kcloud集群中创建一个Deployment,使用所上传的httpd-2.4.33容器镜像,配置使其可以通过外部公网进行访问。

6.创建存活探针

在kcloud集群中使用k8s集群创建名为liveness-http和liveness-tcp的pod,并分别设置http探针和tcp探针监控pod状态。

  • 存活检查(健康检查)

    对于我们的应用(httpd)来说他是否是能够对外提供服务,是评判这个服务健康的标准

  • 就绪检查

    我们容器启动后是否能够正常对外提供服务,就需要做一系列的就绪检查

1.探测的一些参数

  • 延迟时间

允许应用有一个启动时间,过了延迟时间之后基于探测周期开始探测

  • 探测周期

每个固定的时间周期来进行一次探测

  • 最大失败次数

允许的最大失败次数

  • 超时时间

配置请求的超时时间

  • 成功次数

请求成功后多少次判定探测为true 即截止

2.创建http存活探针

在kcloud集群中使用k8s集群创建名为liveness-http的pod,设置http探针监控pod状态,探针初始延迟时间为15秒,探测失败1次后重启容器。

1.创建模板并修改

[root@kcloud-server ~]# kubectl apply -f pod.yaml
[root@kcloud-server ~]# kubectl get pod -w
[root@kcloud-server ~]# kubectl describe pod liveness-http
[root@kcloud-server ~]# kubectl edit pod liveness-http
[root@kcloud-server ~]# kubectl get pod
[root@kcloud-server ~]# kubectl run --image=httpd --port 80 --dry-run -oyaml liveness-http > pod.yaml
[root@kcloud-server ~]# kubectl explain pod.spec
[root@kcloud-server ~]# kubectl explain pod.spec | grep live -i
[root@kcloud-server ~]# kubectl explain pod.spec.containers | grep live -i
[root@kcloud-server ~]# kubectl explain pod.spec.containers.livenessProbe
[root@kcloud-server ~]# kubectl explain pod.spec.containers.livenessProbe.httpGet.port
[root@kcloud-server ~]# cat pod.yaml
[root@kcloud-server ~]# vim pod.yaml
[root@kcloud-server ~]# kubectl get pod
[root@kcloud-server ~]# kubectl delete -f pod.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: 1iveness-http
  name:liveness-http
spec:
  containers:
  - image: httpd
    name:liveness-http
    ports:
    - containerPort: 80
    livenessProbe:
      httpGet:
        port: 80
      initialDelaySeconds: 15
      failureThreshold: 1
  dnsPolicy: ClusterFirst
  restartPolicy: Always

3.创建tcp存活探针

在kcloud集群中使用k8s集群创建名为liveness-tcp的pod,设置tcp探针监控pod状态,探针每15秒探测一次容器,探测容器超时时间为2秒。完成后提交连接kcloud集群节点的用户名、密码和公网IP地址到答题框。

[root@kcloud-server ~]# cat liveness-tcp.yaml
[root@kcloud-server ~]# vim liveness-tcp.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: 1iveness-tcp
  name:liveness-tcp
spec:
  containers:
  - image: httpd
    name:liveness-tcp
    ports:
    - containerPort: 80
    livenessProbe:
      tcpSocket:
        port: 80
        #initialDelaySeconds: 15
        #failureThreshold: 1
      timeoutSeconds: 2
      periodSeconds: 15
  dnsPolicy: ClusterFirst
  restartPolicy: Always

7.Namespace管理

在 kcloud 集群节点/root 目录下编写 YAML 文件 my-namespace.yaml,具体要求如下:

(1)Namespace 名称:test。

[root@kcloud-server ~]# kubectl create ns test --dr-run -oyaml > my-namespace.yaml
[root@kcloud-server ~]# kubectl apply -f my-namespace.yaml
[root@kcloud-server ~]# kubectl get ns

8.Service管理--ClusterIP

在kcloud集群节点/root目录下编写YAML文件service-clusterip.yaml,具体要求如下:

(1)Servie名称:service-clusterip:

(2)命名空间:default;

(3)集群内部访问端口:80;targetPort:81;

(4)Service类型:NodePort

[root@kcloud-server ~]# kubectl create service clusterip service-clusterip --tcp 80:81 -- dry-run -oyaml > service-clusterip.yaml
[root@kcloud-server ~]# vim service-clusterip.yaml
[root@kcloud-server ~]# kubectl apply -f service-clusterip.yaml

9.Service管理--NodePort

在kcloud集群节点/root目录下编写YAML文件service-nodeport.yaml,具体要求如下:

(1)Servie名称:service-nodeport:

(2)命名空间:default;

(3)集群内部访问端口:80;对外暴露端口:30001;

(4)Service类型:NodePort

[root@kcloud-server ~]# kubectl create service nodeport service-nodeport --tcp 80:30001 -- dry-run -oyaml > service-nodeport.yaml
[root@kcloud-server ~]# cat service-nodeport.yaml
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: service-nodeport
  name:service-nodeport
spec:
  ports:
  - name: 80-30001
    port: 80
    portocol: TCP
    targetPort: 30001
    nodePort: 30001
  selector:
    app: service-nodeport
  type: NodePort
status:
  loadBalancer: {}
[root@kcloud-server ~]# kubectl apply -f service-nodeport.yaml

10.Secrets管理--Opaque

在master节点/root目录下编写YAML文件secret.yaml,具体要求如下:

(1)Secret名称:mysecret:

(2)命名空间:default;

(3)类型:Opaque;

(4)username=YWRtaW4=;password=MWYYZDFIMmU2N2Rm。

1.将参数写入文件

将被加密后的字符串解码写入env文件中

[root@kcloud-server ~]# echo YWRtaW4= | base64 -d
admin[root@kcloud-server ~]# vim env-file.txt
[root@kcloud-server ~]# echo MWYYZDFIMmU2N2Rm | base64 -d
1f2dle2e67df[root@kcloud-server ~]# vim env-file.txt
[root@kcloud-server ~]# cat env-file.txt
username=admin
password=1f2dle2e67df

2.基于文件创建secret

[root@kcloud-server ~]# kubectl create secret generic --from-env-file env-file.txt mysecret --dry-run -oyaml > secret.yaml
[root@kcloud-server ~]# vim secret.yaml apiVersion: v1
data:
  password: MWYyZDFlMmU2N2Rm
  username: YWRtaW4=
kind: Secret
metadata:
  creationTimestamp: null
  name: mysecret

11.NetworkPolicy管理--deny

在master节点/root目录下编写YAML文件network-policy-deny.yaml,具体要求如下:

(1)NetworkPolicy名称:default-deny:

(2)命名空间:default;

(3)默认禁止所有Pod流量;

完成后使用该YAML文件创建NetworkPolicy,并提交master节点的用户名、密码和IP到答题框。

[root@kcloud-server ~]# vim network-policy-deny.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
  name: default-deny
  namespace: default
spec:
  policyTypes:
  - Ingress
  podSelector: {}
[root@kcloud-server ~]# kubectl apply -f network-policy-deny.yaml

12.ReplicaSet管理

在kcloud集群节点/root目录下编写YAML文件replicaset.yaml,具体要求如下:

(1)Replicaset名称:nginx:

(2)命名空间:default;

(3)副本数:3;

(4)镜像:nginx

1.基于现有的模板导入

[root@kcloud-server ~]# kubectl get rs -n kube-system coredns-79d6ff7d89 -oyaml > replicaset.yaml

2.修改模板至需要的内容

[root@kcloud-server ~]# vim replicaset.yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  labels:
    app: nginx
  name:nginx
spec:
  replicas: 3
  selector:
    matchLabels:
    app: nginx
  template:
    metadata:
      labels:
      app: nginx
    spec:
      containers:
      - image: nginx
        imagePullPolicy: Always
        name: nginx

15.HPA管理

基于资源使用率来动态调整Pod的副本,来应对突发的流量。

资源使用率是需要metrics插件来实现,监控

在kcloud集群节点/root目录下编写YAML文件hpa.yaml,具体要求如下:

(1)HPA名称:frontend-scaler;

(2)命名空间:default;

(3)副本数伸缩范围:3--10;

(4)期望每个Pod的CPU使用率为50%

1.创建deploy

创建一个deploy来被伸缩

[root@kcloud-server ~]# kubectl create deploy --image nginx --port 80 nginx --dry-run -oyaml > hpa.yaml

2.基于deploy创建hpa

[root@kcloud-server ~]# kubectl autoscale deploy nginx frontend-scaler --min 3 --max 10 --cpu-percent=50 --dry-run -oyaml >> hpa.yaml

3.修改hpa文件

[root@kcloud-server ~]# vim hpa.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: nginx
    name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
    app: nginx
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app:nginx
    spec:
      containers:
      - image: nginx
      name: nginx
      ports:
      - containerPort: 80
---
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
  creationTimestamp: null
  name: frontend-scaler
spec:
  maxReplicas:10
  minReplicas:3
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: nginx
  targetCPUUtilizationPercentage: 50

4.部署

[root@kcloud-server ~]# kubectl apply -f hpa.yaml

5.观察

Pod由1个副本变成了3个副本(hpa要求的最低副本数量)

如果集群中有监控插件,且Pod的cpu使用率大于了50%那么就会产生一个新的Pod

KubeEdge边缘计算

1.KubeEdge简介

ServiceBus:接收云上服务请求和边缘应用进行http交互

2.KubeEdge云端节点部署

KubeEdge运行依赖Kubernetes环境 需要k8s上层搭建平台

KubeEdge中管理边缘节点就和K8s管理普通节点一般简单

1.基础环境准备

云端K8S需要大于2C4G

在本地创建两台Ubuntu22.04的机器 用于下载本地的部署包 上传至云端节点进行K8s与KubeEdge的部署

Ubuntu操作系统使用ubuntu登录 第一次登录需要修改密码

修改好主机名方便后期辨识

下载软件包

本地Ubuntu操作

2.Cloudcore部署

准备文件

[root@chinaskill-node-1 ~]# unzip kubernetes1.22_kubeedge-1.1.11.zip
Archive:  kubernetes1.22_kubeedge-1.1.11.zip
  inflating: kubernetes_kubeedge/checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt
  inflating: kubernetes_kubeedge/keadm-v1.11.1-linux-amd64.tar.gz
  inflating: kubernetes_kubeedge/kubeedge-1.11.1.tar.gz
  inflating: kubernetes_kubeedge/kubeedge-v1.11.1-linux-amd64.tar.gz
[root@chinaskill-node-1 ~]# ll
-rw-r--r-- 1 root root  108923797 Apr  9 14:59 kubernetes1.22_kubeedge-1.1.11.zip
drwxr-xr-x 2 root root       4096 Apr  9 15:32 kubernetes_kubeedge
[root@chinaskill-node-1 ~]# cd kubernetes_kubeedge
[root@chinaskill-node-1 kubernetes_kubeedge]# ll
total 107216
-rw-r--r-- 1 root root      129 Apr 17  2023 checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt
-rw-r--r-- 1 root root 20634490 Apr 17  2023 keadm-v1.11.1-linux-amd64.tar.gz
-rw-r--r-- 1 root root 22559484 Apr 17  2023 kubeedge-1.11.1.tar.gz
-rw-r--r-- 1 root root 66587649 Apr 17  2023 kubeedge-v1.11.1-linux-amd64.tar.gz
[root@chinaskill-node-1 kubernetes_kubeedge]# tar -zxvf keadm-v1.11.1-linux-amd64.tar.gz
keadm-v1.11.1-linux-amd64/
keadm-v1.11.1-linux-amd64/keadm/
keadm-v1.11.1-linux-amd64/keadm/keadm
keadm-v1.11.1-linux-amd64/version
[root@chinaskill-node-1 kubernetes_kubeedge]# ll
total 107220
-rw-r--r-- 1 root root      129 Apr 17  2023 checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt
drwxr-xr-x 3 1001  116     4096 Jul 11  2022 keadm-v1.11.1-linux-amd64
-rw-r--r-- 1 root root 20634490 Apr 17  2023 keadm-v1.11.1-linux-amd64.tar.gz
-rw-r--r-- 1 root root 22559484 Apr 17  2023 kubeedge-1.11.1.tar.gz
-rw-r--r-- 1 root root 66587649 Apr 17  2023 kubeedge-v1.11.1-linux-amd64.tar.gz
[root@chinaskill-node-1 kubernetes_kubeedge]# cp -r keadm-v1.11.1-linux-amd64/keadm/keadm /usr/local/bin/
[root@chinaskill-node-1 kubernetes_kubeedge]# mkdir /etc/kubeedge
[root@chinaskill-node-1 kubernetes_kubeedge]# ll
total 107220
-rw-r--r-- 1 root root      129 Apr 17  2023 checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt
drwxr-xr-x 3 1001  116     4096 Jul 11  2022 keadm-v1.11.1-linux-amd64
-rw-r--r-- 1 root root 20634490 Apr 17  2023 keadm-v1.11.1-linux-amd64.tar.gz
-rw-r--r-- 1 root root 22559484 Apr 17  2023 kubeedge-1.11.1.tar.gz
-rw-r--r-- 1 root root 66587649 Apr 17  2023 kubeedge-v1.11.1-linux-amd64.tar.gz
[root@chinaskill-node-1 kubernetes_kubeedge]# cp checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt kubeedge-1.11.1.tar.gz kubeedge-v1.11.1-linux-amd64.tar.gz /etc/kubeedge
[root@chinaskill-node-1 kubernetes_kubeedge]# tar -zxvf kubeedge-1.11.1.tar.gz
[root@chinaskill-node-1 kubernetes_kubeedge]# ll
total 107224
-rw-r--r--  1 root root      129 Apr 17  2023 checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt
drwxr-xr-x  3 1001  116     4096 Jul 11  2022 keadm-v1.11.1-linux-amd64
-rw-r--r--  1 root root 20634490 Apr 17  2023 keadm-v1.11.1-linux-amd64.tar.gz
drwxrwxr-x 20 root root     4096 Jul 11  2022 kubeedge-1.11.1
-rw-r--r--  1 root root 22559484 Apr 17  2023 kubeedge-1.11.1.tar.gz
-rw-r--r--  1 root root 66587649 Apr 17  2023 kubeedge-v1.11.1-linux-amd64.tar.gz
[root@chinaskill-node-1 kubernetes_kubeedge]# cp -r kubeedge-1.11.1/build/crds /etc/kubeedge/

安装

[root@chinaskill-node-1 kubeedge]# keadm deprecated init --advertise-address 47.97.171.61 --kubeedge-version 1.11.1 --tarballpath /etc/kubeedge
Kubernetes version verification passed, KubeEdge installation will start...
keadm will install 1.11 CRDs
Expected or Default KubeEdge version 1.11.1 is already downloaded and will checksum for it.
kubeedge-v1.11.1-linux-amd64.tar.gz checksum:
checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt content:
Expected or Default checksum file checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt is already downloaded.
Expected or Default KubeEdge version 1.11.1 is already downloaded
keadm will download version 1.11 service file
[Run as service] start to download service file for cloudcore
[Run as service] success to download service file for cloudcore
kubeedge-v1.11.1-linux-amd64/
kubeedge-v1.11.1-linux-amd64/edge/
kubeedge-v1.11.1-linux-amd64/edge/edgecore
kubeedge-v1.11.1-linux-amd64/version
kubeedge-v1.11.1-linux-amd64/cloud/
kubeedge-v1.11.1-linux-amd64/cloud/csidriver/
kubeedge-v1.11.1-linux-amd64/cloud/csidriver/csidriver
kubeedge-v1.11.1-linux-amd64/cloud/iptablesmanager/
kubeedge-v1.11.1-linux-amd64/cloud/iptablesmanager/iptablesmanager
kubeedge-v1.11.1-linux-amd64/cloud/cloudcore/
kubeedge-v1.11.1-linux-amd64/cloud/cloudcore/cloudcore
kubeedge-v1.11.1-linux-amd64/cloud/controllermanager/
kubeedge-v1.11.1-linux-amd64/cloud/controllermanager/controllermanager
kubeedge-v1.11.1-linux-amd64/cloud/admission/
kubeedge-v1.11.1-linux-amd64/cloud/admission/admission
​
KubeEdge cloudcore is running, For logs visit:  /var/log/kubeedge/cloudcore.log
CloudCore started

查看端口是否启动成功

[root@chinaskill-node-1 kubeedge]# netstat -ntpl
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 0.0.0.0:30080           0.0.0.0:*               LISTEN      9330/kube-proxy
tcp        0      0 0.0.0.0:30081           0.0.0.0:*               LISTEN      9330/kube-proxy
tcp        0      0 0.0.0.0:13379           0.0.0.0:*               LISTEN      9330/kube-proxy
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      8361/kubelet
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      9330/kube-proxy
tcp        0      0 127.0.0.1:35113         0.0.0.0:*               LISTEN      8361/kubelet
tcp        0      0 172.18.64.198:2379      0.0.0.0:*               LISTEN      7984/etcd
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      7984/etcd
tcp        0      0 172.18.64.198:2380      0.0.0.0:*               LISTEN      7984/etcd
tcp        0      0 127.0.0.1:2381          0.0.0.0:*               LISTEN      7984/etcd
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      573/rpcbind
tcp        0      0 0.0.0.0:52885           0.0.0.0:*               LISTEN      9330/kube-proxy
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      4727/sshd
tcp        0      0 0.0.0.0:30776           0.0.0.0:*               LISTEN      9330/kube-proxy
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1098/master
tcp        0      0 0.0.0.0:30779           0.0.0.0:*               LISTEN      9330/kube-proxy
tcp6       0      0 :::10250                :::*                    LISTEN      8361/kubelet
tcp6       0      0 :::6443                 :::*                    LISTEN      7921/kube-apiserver
tcp6       0      0 :::111                  :::*                    LISTEN      573/rpcbind
tcp6       0      0 :::10000                :::*                    LISTEN      22141/cloudcore
tcp6       0      0 :::10256                :::*                    LISTEN      9330/kube-proxy
tcp6       0      0 :::10257                :::*                    LISTEN      7879/kube-controlle
tcp6       0      0 :::10002                :::*                    LISTEN      22141/cloudcore
tcp6       0      0 :::10259                :::*                    LISTEN      7920/kube-scheduler
tcp6       0      0 :::22                   :::*                    LISTEN      4727/sshd
tcp6       0      0 ::1:25                  :::*                    LISTEN      1098/master
tcp6       0      0 :::4443                 :::*                    LISTEN      10368/metrics-serve

修改配置文件

[root@chinaskill-node-1 kubeedge]# vim /etc/kubeedge/config/cloudcore.yaml
105 router:
106     address: 0.0.0.0
107     enable: true

停止默认进程

[root@chinaskill-node-1 kubeedge]# pkill cloudcore.service

使用systemctl管理

[root@chinaskill-node-1 kubeedge]# cp cloudcore.service /etc/systemd/system/
[root@chinaskill-node-1 kubeedge]# systemctl enable --now cloudcore.service
Created symlink from /etc/systemd/system/multi-user.target.wants/cloudcore.service to /etc/systemd/system/cloudcore.service.

3.Kubeedge边缘节点部署

安装Docker

1.直接安装(Euler操作系统)
[root@chinaskill-node-1 kubeedge]# yum repolist
[root@chinaskill-node-1 kubeedge]# yum repolist all
[root@chinaskill-node-1 kubeedge]# yum install -y docker
[root@chinaskill-node-1 kubeedge]# docker -v
[root@chinaskill-node-1 kubeedge]# systemctl start docker
2.Yum源安装
[root@chinaskill-node-1 kubeedge]# vi /etc/yum.repos.d/docker.repo
[docker]
name=docker
baseurl=file:///root/yum
gpgcheck=0
enabled=1
#若报错看看是不是有原来的源没删 [root@chinaskill-node-1 kubeedge]# yum makecache 
[root@chinaskill-node-1 kubeedge]# yum install -y docker-ce
[root@chinaskill-node-1 kubeedge]# systemctl start docker

安装边缘节点

1.找到keadm文件
[root@chinaskill-node-2 bin]# cd /root/kubeedge
[root@chinaskill-node-2 kubeedge]# ll
total 403M
-rw-r--r-- 1 root root  129 Apr 17  2023 checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt
-rw-r--r-- 1 root root  52M Apr 17  2023 cloudcore.tar
-rw-r--r-- 1 root root 191M Apr 17  2023 installation.tar
-rwxr-xr-x 1 1001  116  65M Jul 11  2022 keadm
-rw-r--r-- 1 root root  22M Apr 17  2023 kubeedge-1.11.1.tar.gz
-rw-r--r-- 1 root root  64M Apr 17  2023 kubeedge-v1.11.1-linux-amd64.tar.gz
-rw-r--r-- 1 root root  12M Apr 17  2023 mosquitto.tar
-rw-r--r-- 1 root root 705K Apr 17  2023 pause.tar
[root@chinaskill-node-2 kubeedge]# cp -r keadm /usr/local/bin
2.拉取docker镜像
[root@chinaskill-node-2 kubeedge]# docker load -i mosquitto.tar
9733ccc39513: Loading layer [==================================================>]  5.895MB/5.895MB
10c384d6e3d1: Loading layer [==================================================>]  6.256MB/6.256MB
bf05e19817c3: Loading layer [==================================================>]  2.048kB/2.048kB
Loaded image: eclipse-mosquitto:1.6.15
3.将文件放到指定目录
[root@chinaskill-node-2 kubeedge]# mkdir /etc/kubeedge
[root@chinaskill-node-2 kubeedge]# ll
total 403M
-rw-r--r-- 1 root root  129 Apr 17  2023 checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt
-rw-r--r-- 1 root root  52M Apr 17  2023 cloudcore.tar
-rw-r--r-- 1 root root 191M Apr 17  2023 installation.tar
-rwxr-xr-x 1 1001  116  65M Jul 11  2022 keadm
-rw-r--r-- 1 root root  22M Apr 17  2023 kubeedge-1.11.1.tar.gz
-rw-r--r-- 1 root root  64M Apr 17  2023 kubeedge-v1.11.1-linux-amd64.tar.gz
-rw-r--r-- 1 root root  12M Apr 17  2023 mosquitto.tar
-rw-r--r-- 1 root root 705K Apr 17  2023 pause.tar
[root@chinaskill-node-2 kubeedge]# cp checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt kubeedge-1.11.1.tar.gz kubeedge-v1.11.1-linux-amd64.tar.gz /etc/kubeedge
[root@chinaskill-node-2 kubeedge]# tar -zxvf kubeedge-1.11.1.tar.gz
[root@chinaskill-node-2 kubeedge]# cp -r kubeedge-1.11.1/build/crds /etc/kubeedge
4.云端节点获取token
[root@chinaskill-node-1 kubeedge]# keadm gettoken
5dd5d8a8c6155c66586c6fb9cd3791c55537d8e2924659296d46b5a486205c41.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NDQyNzM5MTB9.-WxJHfhey0rpuktweFqupOYA6LcWSkKkVRJ8ciWjwTs
5.边缘节点加入
[root@chinaskill-node-1 kubeedge]# keadm deprecated join --cloudcore-ipport 47.97.171.61:10000 --kubeedge-version 1.11.1 --token 5dd5d8a8c6155c66586c6fb9cd3791c55537d8e2924659296d46b5a486205c41.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NDQyNzYyNDB9.S8P6IaiY22Q14Tr5HyR9g2GTMJ3uVlwcYULxEFq5PX8 --image-repository docker.io
install MQTT service successfully.
Expected or Default KubeEdge version 1.11.1 is already downloaded and will checksum for it.
kubeedge-v1.11.1-linux-amd64.tar.gz checksum:
checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt content:
Expected or Default checksum file checksum_kubeedge-v1.11.1-linux-amd64.tar.gz.txt is already downloaded.
Expected or Default KubeEdge version 1.11.1 is already downloaded
keadm will download version 1.11 service file
[Run as service] service file already exisits in /etc/kubeedge//edgecore.service, skip download
kubeedge-v1.11.1-linux-amd64/
kubeedge-v1.11.1-linux-amd64/edge/
kubeedge-v1.11.1-linux-amd64/edge/edgecore
kubeedge-v1.11.1-linux-amd64/version
kubeedge-v1.11.1-linux-amd64/cloud/
kubeedge-v1.11.1-linux-amd64/cloud/csidriver/
kubeedge-v1.11.1-linux-amd64/cloud/csidriver/csidriver
kubeedge-v1.11.1-linux-amd64/cloud/iptablesmanager/
kubeedge-v1.11.1-linux-amd64/cloud/iptablesmanager/iptablesmanager
kubeedge-v1.11.1-linux-amd64/cloud/cloudcore/
kubeedge-v1.11.1-linux-amd64/cloud/cloudcore/cloudcore
kubeedge-v1.11.1-linux-amd64/cloud/controllermanager/
kubeedge-v1.11.1-linux-amd64/cloud/controllermanager/controllermanager
kubeedge-v1.11.1-linux-amd64/cloud/admission/
kubeedge-v1.11.1-linux-amd64/cloud/admission/admission
6.查看状态
[root@chinaskill-node-2 kubeedge]# systemctl status edgecore
● edgecore.service
   Loaded: loaded (/etc/systemd/system/edgecore.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2025-04-09 17:14:24 CST; 4min 42s ago
 Main PID: 6701 (edgecore)
    Tasks: 11
   Memory: 26.6M
   CGroup: /system.slice/edgecore.service
           └─6701 /usr/local/bin/edgecore
​
Apr 09 17:18:50 chinaskill-node-2 edgecore[6701]: I0409 17:18:50.206996    6701 record.go:24] Warning FailedCreatePodSandBox Failed to create pod sandbox: rpc error: code = Unkn...
Apr 09 17:18:50 chinaskill-node-2 edgecore[6701]: E0409 17:18:50.207070    6701 edged.go:1011] worker [0] handle pod addition item [kube-flannel-ds-jztzf] failed: sync...sandbox fo
Apr 09 17:18:55 chinaskill-node-2 edgecore[6701]: W0409 17:18:55.592459    6701 context_channel.go:159] Get bad anonName:ac40112e-d11b-4f77-a260-a4058e6fff26 when send...do nothing
Apr 09 17:19:00 chinaskill-node-2 edgecore[6701]: I0409 17:19:00.200356    6701 edged.go:992] worker [1] get pod addition item [kube-proxy-4j5b8]
Apr 09 17:19:00 chinaskill-node-2 edgecore[6701]: E0409 17:19:00.200421    6701 edged.go:995] consume pod addition backoff: Back-off consume pod [kube-proxy-4j5b8] add...f: [2m40s]
Apr 09 17:19:00 chinaskill-node-2 edgecore[6701]: I0409 17:19:00.200466    6701 edged.go:997] worker [1] backoff pod addition item [kube-proxy-4j5b8] failed, re-add to queue
Apr 09 17:19:00 chinaskill-node-2 edgecore[6701]: I0409 17:19:00.207521    6701 edged.go:992] worker [2] get pod addition item [kube-flannel-ds-jztzf]
Apr 09 17:19:00 chinaskill-node-2 edgecore[6701]: E0409 17:19:00.207549    6701 edged.go:995] consume pod addition backoff: Back-off consume pod [kube-flannel-ds-jztzf...f: [2m40s]
Apr 09 17:19:00 chinaskill-node-2 edgecore[6701]: I0409 17:19:00.207599    6701 edged.go:997] worker [2] backoff pod addition item [kube-flannel-ds-jztzf] failed, re-add to queue
Apr 09 17:19:05 chinaskill-node-2 edgecore[6701]: W0409 17:19:05.618416    6701 context_channel.go:159] Get bad anonName:c4bc73a8-ef1d-4ada-8700-c186bbd26117 when send...do nothing
Hint: Some lines were ellipsized, use -l to show in full.
7.云端节点检验
[root@chinaskill-node-1 kubeedge]# kubectl get nodes
NAME                STATUS   ROLES                         AGE    VERSION
default-edge-node   Ready    agent,edge                    2m7s   v1.22.6-kubeedge-v1.11.1
k8s-master-node1    Ready    control-plane,master,worker   128m   v1.22.1
8.可能报错的解决方案
vi /etc/docker/daemon.json
​
{
"exec-opts": ["native.cgroupdriver=cgroupfs"]
}
​
systemctl daemon-reload
systemctl restart docker

【版权声明】本文为华为云社区用户原创内容,未经允许不得转载,如需转载请自行联系原作者进行授权。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。