aaaafadf
【摘要】 将mongodb-linux-x86_64-4.0.4.tgz复制到chinaskill-sql-1和chinaskill-sql-2[root@chinaskill-sql-1 ~]# tar -zxf mongodb-linux-x86_64-4.0.4.tgz -C /usr/local[root@chinaskill-sql-1 ~]# cd /usr/local[root@chin...
将mongodb-linux-x86_64-4.0.4.tgz复制到chinaskill-sql-1和chinaskill-sql-2
[root@chinaskill-sql-1 ~]# tar -zxf mongodb-linux-x86_64-4.0.4.tgz -C /usr/local
[root@chinaskill-sql-1 ~]# cd /usr/local
[root@chinaskill-sql-1 local]# mv mongodb-linux-x86_64-4.0.4/ mongodb
[root@chinaskill-sql-1 local]# vi /etc/profile
export PATH=$PATH:/usr/local/mongodb/bin
[root@chinaskill-sql-1 local]# source /etc/profile
[root@chinaskill-sql-1 local]# mkdir -p /data/db
[root@chinaskill-sql-1 local]# mongo --version
[root@chinaskill-sql-2 ~]# tar -zxf mongodb-linux-x86_64-4.0.4.tgz -C /usr/local
[root@chinaskill-sql-2 ~]# cd /usr/local
[root@chinaskill-sql-2 local]# mv mongodb-linux-x86_64-4.0.4/ mongodb
[root@chinaskill-sql-2 local]# vi /etc/profile
export PATH=$PATH:/usr/local/mongodb/bin
[root@chinaskill-sql-2 local]# source /etc/profile
[root@chinaskill-sql-2 local]# mkdir -p /data/db
[root@chinaskill-sql-2 local]# mongo --version
将mongodb-linux-x86_64-4.0.4.tgz复制到chinaskill-sql-1和chinaskill-sql-2
[root@chinaskill-sql-1 ~]# tar -zxf mongodb-linux-x86_64-4.0.4.tgz -C /usr/local
[root@chinaskill-sql-1 ~]# cd /usr/local
[root@chinaskill-sql-1 local]# mv mongodb-linux-x86_64-4.0.4/ mongodb
[root@chinaskill-sql-1 local]# vi /etc/profile
export PATH=$PATH:/usr/local/mongodb/bin
[root@chinaskill-sql-1 local]# source /etc/profile
[root@chinaskill-sql-1 local]# mkdir -p /data/db
[root@chinaskill-sql-1 local]# mongo --version
[root@chinaskill-sql-2 ~]# tar -zxf mongodb-linux-x86_64-4.0.4.tgz -C /usr/local
[root@chinaskill-sql-2 ~]# cd /usr/local
[root@chinaskill-sql-2 local]# mv mongodb-linux-x86_64-4.0.4/ mongodb
[root@chinaskill-sql-2 local]# vi /etc/profile
export PATH=$PATH:/usr/local/mongodb/bin
[root@chinaskill-sql-2 local]# source /etc/profile
[root@chinaskill-sql-2 local]# mkdir -p /data/db
[root@chinaskill-sql-2 local]# mongo --version
将mongodb-linux-x86_64-4.0.4.tgz复制到chinaskill-sql-1和chinaskill-sql-2
[root@chinaskill-sql-1 ~]# tar -zxf mongodb-linux-x86_64-4.0.4.tgz -C /usr/local
[root@chinaskill-sql-1 ~]# cd /usr/local
[root@chinaskill-sql-1 local]# mv mongodb-linux-x86_64-4.0.4/ mongodb
[root@chinaskill-sql-1 local]# vi /etc/profile
export PATH=$PATH:/usr/local/mongodb/bin
[root@chinaskill-sql-1 local]# source /etc/profile
[root@chinaskill-sql-1 local]# mkdir -p /data/db
[root@chinaskill-sql-1 local]# mongo --version
[root@chinaskill-sql-2 ~]# tar -zxf mongodb-linux-x86_64-4.0.4.tgz -C /usr/local
[root@chinaskill-sql-2 ~]# cd /usr/local
[root@chinaskill-sql-2 local]# mv mongodb-linux-x86_64-4.0.4/ mongodb
[root@chinaskill-sql-2 local]# vi /etc/profile
export PATH=$PATH:/usr/local/mongodb/bin
[root@chinaskill-sql-2 local]# source /etc/profile
[root@chinaskill-sql-2 local]# mkdir -p /data/db
[root@chinaskill-sql-2 local]# mongo --version
[root@chinaskill-sql-1 local]# vi /usr/local/mongodb/mongod.conf
bind_ip=0.0.0.0
replSet=rs
[root@chinaskill-sql-1 local]# mongod -f /usr/local/mongodb/mongod.conf &
[root@chinaskill-sql-2 local]# vi /usr/local/mongodb/mongod.conf
bind_ip=0.0.0.0
replSet=rs
[root@chinaskill-sql-2 local]# mongod -f /usr/local/mongodb/mongod.conf &
[root@chinaskill-sql-1 local]# mongo
> rs.status()
{
"operationTime" : Timestamp(0, 0),
"ok" : 0,
"errmsg" : "no replset config has been received",
"code" : 94,
"codeName" : "NotYetInitialized",
"$clusterTime" : {
"clusterTime" : Timestamp(0, 0),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
> var config={_id:"rs",members:[{_id:0,host:"172.16.2.76:27017"},{_id:1,host:"172.16.2.45:27017"}]}
> rs.initiate(config)
……
rs:PRIMARY> rs.status()
{
"set" : "rs",
"date" : ISODate("2022-09-08T06:49:13.414Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1662619748, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1662619748, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1662619748, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1662619748, 1),
"t" : NumberLong(1)
}
},
"lastStableCheckpointTimestamp" : Timestamp(1662619698, 1),
"members" : [
{
"_id" : 0,
"name" : "172.16.2.76:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 284,
"optime" : {
"ts" : Timestamp(1662619748, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-09-08T06:49:08Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "could not find member to sync from",
"electionTime" : Timestamp(1662619696, 1),
"electionDate" : ISODate("2022-09-08T06:48:16Z"),
"configVersion" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "172.16.2.45:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 68,
"optime" : {
"ts" : Timestamp(1662619748, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1662619748, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-09-08T06:49:08Z"),
"optimeDurableDate" : ISODate("2022-09-08T06:49:08Z"),
"lastHeartbeat" : ISODate("2022-09-08T06:49:12.328Z"),
"lastHeartbeatRecv" : ISODate("2022-09-08T06:49:12.582Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "172.16.2.76:27017",
"syncSourceHost" : "172.16.2.76:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1
}
],
"ok" : 1,
"operationTime" : Timestamp(1662619748, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1662619748, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
rs:PRIMARY> exit
bye
[root@chinaskill-node-1 ~]# mkdir /usr/local/nodejs
[root@chinaskill-node-1 ~]# tar -Jxf node-v12.16.1-linux-x64.tar.xz -C /usr/local/nodejs
[root@chinaskill-node-1 ~]# vi /etc/profile
export NODE_PATH=/usr/local/nodejs/node-v12.16.1-linux-x64
export PATH=$PATH:$NODE_PATH/bin
[root@chinaskill-node-1 ~]# source /etc/profile
[root@chinaskill-node-1 ~]# node -v
v12.16.1
[root@chinaskill-node-1 ~]# yum install gcc-c++
[root@chinaskill-node-1 ~]# yum groupinstall 'Development Tools'
[root@chinaskill-node-1 ~]# tar -zxf rocket.chat.tgz -C /tmp
[root@chinaskill-node-1 ~]# cd /tmp/bundle/programs/server
[root@chinaskill-node-1 server]# npm install
[root@chinaskill-node-1 server]# mv /tmp/bundle /opt/Rocket.Chat
[root@chinaskill-node-1 server]# cd /opt/Rocket.Chat/
[root@chinaskill-node-1 Rocket.Chat]# useradd -M rocketchat
[root@chinaskill-node-1 Rocket.Chat]# usermod -L rocketchat
[root@chinaskill-node-1 Rocket.Chat]# chown -R rocketchat:rocketchat /opt/Rocket.Chat
[root@chinaskill-node-1 Rocket.Chat]# vi /lib/systemd/system/rocketchat.service
[Unit]
Description=The Rocket.Chat server running on Linux
After=network.target remote-fs.target nss-lookup.target nginx.service mongod.service
[Service]
ExecStart=/usr/local/nodejs/node-v12.16.1-linux-x64/bin/node /opt/Rocket.Chat/main.js
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=rocketchat
User=rocketchat
Environment=ROOT_URL=http://localhost:3000
Environment=PORT=3000
Environment=MONGO_URL=mongodb://172.16.2.76:27017/rocketchat?replicaSet=rs
Environment=MONGO_OPLOG_URL=mongodb://172.16.2.76:27017/local?replicaSet=rs
[Install]
WantedBy=multi-user.target
[root@chinaskill-node-1 Rocket.Chat]# systemctl enable rocketchat
[root@chinaskill-node-1 Rocket.Chat]# systemctl start rocketchat
[root@chinaskill-node-1 Rocket.Chat]# systemctl status rocketchat
● rocketchat.service - The Rocket.Chat server running on Linux
Loaded: loaded (/usr/lib/systemd/system/rocketchat.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2022-09-08 15:14:40 CST; 6s ago
Main PID: 9168 (node)
CGroup: /system.slice/rocketchat.service
└─9168 /usr/local/nodejs/node-v12.16.1-linux-x64/bin/node /opt/Rocket.Chat/main.js
Sep 08 15:14:40 chinaskill-node-1 systemd[1]: Started The Rocket.Chat server running on Linux.
[root@chinaskill-sql-1 local]# mongo
rs:PRIMARY> show dbs
admin 0.000GB
config 0.000GB
local 0.001GB
rocketchat 0.002GB
rs:PRIMARY> use rocketchat
switched to db rocketchat
rs:PRIMARY> show collections
……
[root@chinaskill-sql-2 local]# mongo
rs:SECONDARY> db.getMongo().setSlaveOk()
rs:SECONDARY> show dbs
admin 0.000GB
config 0.000GB
local 0.001GB
rocketchat 0.002GB
rs:SECONDARY> exit
bye
http://121.37.137.27:3000
tar -zxvf rocketchat-cloud.tar.gz -C /opt
vim /etc/yum.repos.d/yum.repo[local]name=loaclbaseurl=file:///opt/yum.repogpgcheck=0enabled=1
yum install -y mongodb-org
mongo --version
systemctl enable mongod --now
【题目4】
1.配置mongodb文件
vim /etc/mongodb.conf...#network interfaces
net:
port: 27017
bindip: 127.0.0.1 #这里bindip改为0.0.0.0 (为了更好初始化)...#replication:
replication:
replSetName: rs #设置集群名称
2.对集群进行初始化
mongo #进入命令行 (进行初始化的配置)...#可以首先创建一个json格式的变量数据> var rs_conf={"_id":"rs","members":[{"_id":0,"host":"172.16.1.125:27017"}, {"_id":1,"host":"172.16.1.7:27017"}]} #eth0的网卡地址
#根据如上定以对rs集群进行初始化> rs.initiate(rs_conf)
#显示如下即为成功["ok : 1"]
3.查看集群状态
rs:SECONDARY> rs.status()
【题目5】
1 yum源安装
yum install -y nodejs
node --version
【题目7】上云
1.解压rocketchat压缩包到缓存目录/tmp
tar xvf rocket.chat.tgz -C /tmp/
2 安装依赖
yum install -y gcc-c++ make GraphicsMagick
3.安装npm依赖
#设置一个国内npm源npm config set registry https://registry.npmmirror.com/npm config set ELECTRON_MIRROR https://cdn.npmmirror.com/dist/electron/
#安装依赖cd /tmp/bundle/programs/server && sudo npm install
5.将包移到指定目录,并创建rocket.chat用户管理改目录
mv /tmp/bundle /opt/Rocket.chatuseradd -M rocketchat && sudo usermod -L rocketchatchown -R rocketchat:rocketchat /opt/Rocket.chat
6配置rockerchat.service实现对其的管理,必要指定必要的环境。
cd /opt/Rocket.chat
export MONGO_URL=mongodb://172.16.1.125:27017/rocketchat?replicaSet=rs #主节点ipexport MONGO_OPLOG_URL=mongodb://172.16.2.168:27017/local?replicaSet=rs #主节点ipexport ROOT_URL=http://172.16.2.59:3000/ #node节点ipexport PORT=3000
7.启动服务
cd /opt/Rocket.chat
node main.js
systemctl enable rocketchat && sudo systemctl start rocketchat
下载kubectl配置文件
mkdir -p $HOME/.kube
将kubeconfig.json复制到node节点
mv -f kubeconfig.json $HOME/.kube/config
kubectl config use-context internal
kubectl cluster-info
kubectl get nodes
将helm-v3.3.0-linux-amd64.tar.gz上传到云主机
tar -zxf helm-v3.3.0-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin
helm version
tar -zxf mychart-0.1.0.tgz
cd mychart
vi values.yaml
service:
type: NodePort
helm package .
helm install nginx mychart-0.1.0.tgz
helm list
helm uninstall nginx
将/root/mychart/mychart-0.1.0.tgz复制到本机
上传模板
安装模板
tar -zxf mariadb-7.3.14.tgz
cd mariadb
vi values.yaml
service:
## Kubernetes service type, ClusterIP and NodePort are supported at present
type: NodePort
nodePort:
master: 32334
rootUser:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run
##
password: "chinaskill"
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: false
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: false
docker load -i bitnami_mariadb-10.3.22-debian-10-r27.tar
helm install mariadb .
应用部署mriadb
kubectl create ns mariadb
mv mariadb mariadb10
tar -zxf mariadb-7.3.14.tgz
cd mariadb
vi values.yaml
service:
## Kubernetes service type, ClusterIP and NodePort are supported at present
type: NodePort
db:
## MariaDB username and password
## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-user-on-first-run
##
user: "chinaskillroot"
password: "chinaskillpassword"
## Database to create
## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-on-first-run
##
name: chinaskill
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: false
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: false
helm install -n mariadb mariadb .
helm list -A
tar -zxf wordpress-13.0.23.tgz
cd wordpress
vi values.yaml
service:
## @param service.type WordPress service type
##
type: NodePort
readinessProbe:
enabled: false
readinessProbe:
enabled: false
查看persistence:配置
mariadb:
persistence:
enabled: false
docker load -i bitnami_wordpress-5.9.2-debian-10-r0.tar
docker load -i bitnami_mariadb-10.5.15-debian-10-r11.tar
mkdir /wp
chmod 777 /wp
vi pv.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-wp
labels:
name: pv-wp
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /wp
kubectl apply -f pv.yaml
helm install wordpress .
mkdir -p /data/charts
chmod -R 777 /data/charts/
kubectl create ns chartmuseum
vi chartmuseum.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: chartmuseum
name: chartmuseum
namespace: chartmuseum
spec:
replicas: 1
selector:
matchLabels:
app: chartmuseum
template:
metadata:
labels:
app: chartmuseum
spec:
containers:
- image: chartmuseum/chartmuseum:latest
imagePullPolicy: IfNotPresent
name: chartmuseum
ports:
- containerPort: 8080
protocol: TCP
env:
- name: DEBUG
value: "1"
- name: STORAGE
value: local
- name: STORAGE_LOCAL_ROOTDIR
value: /charts
volumeMounts:
- mountPath: /charts
name: charts-volume
volumes:
- name: charts-volume
hostPath:
path: /data/charts
type: Directory
---
apiVersion: v1
kind: Service
metadata:
name: chartmuseum
namespace: chartmuseum
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: chartmuseum
kubectl apply -f chartmuseum.yaml
kubectl get svc -n chartmuseum
curl 10.106.178.78:8080
helm repo add chartmuseum http://10.106.178.78:8080
[root@master ~]# helm repo list
NAME URL
chartmuseum http://10.106.178.78:8080
[root@master ~]# curl --data-binary "@wordpress-13.0.23.tgz" http://10.106.178.78:8080/api/charts
{"saved":true}
[root@master ~]# curl http://10.106.178.78:8080/api/charts
{"wordpress":[{"name":"wordpress","home":"https://github.com/bitnami/charts/tree/master/bitnami/wordpress","sources":["https://github.com/bitnami/bitnami-docker-wordpress","https://wordpress.org/"],"version":"13.0.23","description":"WordPress is the world's most popular blogging and content management platform. Powerful yet simple, everyone from students to global corporations use it to build beautiful, functional websites.","keywords":["application","blog","cms","http","php","web","wordpress"],"maintainers":[{"name":"Bitnami","email":"containers@bitnami.com"}],"icon":"https://bitnami.com/assets/stacks/wordpress/img/wordpress-stack-220x234.png","apiVersion":"v2","appVersion":"5.9.2","annotations":{"category":"CMS"},"dependencies":[{"name":"memcached","version":"6.x.x","repository":"https://charts.bitnami.com/bitnami","condition":"memcached.enabled"},{"name":"mariadb","version":"10.x.x","repository":"https://charts.bitnami.com/bitnami","condition":"mariadb.enabled"},{"name":"common","version":"1.x.x","repository":"https://charts.bitnami.com/bitnami","tags":["bitnami-common"]}],"urls":["charts/wordpress-13.0.23.tgz"],"created":"2022-09-11T10:00:20.391057325Z","digest":"a4b42912909775eec74ec0cab4c021692bbf8234ead837f2cae3824f0762b228"}]}
helm repo update
helm install wordpress chartmuseum/wordpress
yum groupinstall 'Development Tools'
yum install glibc-static
tar -zxf chkrootkit.tar.gz
cd chkrootkit
make sense
mkdir /var/log/chkrootkit
./chkrootkit > /var/log/chkrootkit/chkrootkit.log
https://elk-docker.readthedocs.io/
echo "vm.max_map_count=262144" >> /etc/sysctl.conf
sysctl -p
docker load -i sebp_elk-7.16.3.tar
docker run -d --name elk --restart always -p 5601:5601 -p 9200:9200 -p 5044:5044 -e TZ=Asia/Shanghai sebp/elk:7.16.3
docker exec -it elk bash
/opt/logstash/bin/logstash --path.data /tmp/logstash/data -e 'input { stdin { } } output { elasticsearch { hosts => ["localhost"] } }'
this is a dummy entry
Ctrl+C
http://192.168.100.79:9200/_search?pretty&size=1000
http://192.168.100.79:5601
rpm -ivh filebeat-7.16.3-x86_64.rpm
mv /etc/filebeat/filebeat.yml /etc/filebeat/filebeat.yml.bak
rm -f /etc/filebeat/filebeat.yml
vi /etc/filebeat/filebeat.yml
output:
logstash:
enabled: true
hosts:
- 192.168.100.79:5044
timeout: 15
ssl:
certificate_authorities:
- /etc/pki/tls/certs/logstash-beats.crt
filebeat:
inputs:
-
paths:
- /var/log/messages
document_type: syslog
vi /etc/pki/tls/certs/logstash-beats.crt
-----BEGIN CERTIFICATE-----
MIIC6zCCAdOgAwIBAgIJANPZwuf+5wTLMA0GCSqGSIb3DQEBCwUAMAwxCjAIBgNV
BAMMASowHhcNMTUxMjI4MTA0NTMyWhcNMjUxMjI1MTA0NTMyWjAMMQowCAYDVQQD
DAEqMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+jHFvhyYKiPXc7k
0c33f2QV+1hHNyW/uwcJbp5jG82cuQ41v70Z1+b2veBW4sUlDY3yAIEOPSUD8ASt
9m72CAo4xlwYKDvm/Sa3KJtDk0NrQiz6PPyBUFsY+Bj3xn6Nz1RW5YaP+Q1Hjnks
PEyQu4vLgfTSGYBHLD4gvs8wDWY7aaKf8DfuP7Ov74Qlj2GOxnmiDEF4tirlko0r
qQcvBgujCqA7rNoG+QDmkn3VrxtX8mKF72bxQ7USCyoxD4cWV2mU2HD2Maed3KHj
KAvDAzSyBMjI+qi9IlPN5MR7rVqUV0VlSKXBVPct6NG7x4WRwnoKjTXnr3CRADD0
4uvbQQIDAQABo1AwTjAdBgNVHQ4EFgQUVFurgDwdcgnCYxszc0dWMWhB3DswHwYD
VR0jBBgwFoAUVFurgDwdcgnCYxszc0dWMWhB3DswDAYDVR0TBAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAQEAaLSytepMb5LXzOPr9OiuZjTk21a2C84k96f4uqGqKV/s
okTTKD0NdeY/IUIINMq4/ERiqn6YDgPgHIYvQheWqnJ8ir69ODcYCpsMXIPau1ow
T8c108BEHqBMEjkOQ5LrEjyvLa/29qJ5JsSSiULHvS917nVgY6xhcnRZ0AhuJkiI
ARKXwpO5tqJi6BtgzX/3VDSOgVZbvX1uX51Fe9gWwPDgipnYaE/t9TGzJEhKwSah
kNr+7RM+Glsv9rx1KcWcx4xxY3basG3/KwvsGAFPvk5tXbZ780VuNFTTZw7q3p8O
Gk1zQUBOie0naS0afype5qFMPp586SF/2xAeb68gLg==
-----END CERTIFICATE-----
systemctl start filebeat
l解压docker-repo.tar.gz
[root@localhost ~]# tar -zxvf docker-repo.tar.gz -C /opt/
配置Docker源
[root@localhost ~]# cd /etc/yum.repos.d/
[root@localhost yum.repos.d]# mv * /home/
[root@localhost yum.repos.d]# cat local.repo
[centos]
name=centos
baseurl=http://172.16.100.216/centos
gpgcheck=0
enabled=1
[Docker]
name=Docker
baseurl=file:///opt/docker-repo
gpgcheck=0
enabled=1
清理原有的源
[root@localhost yum.repos.d]# yum clean all
Loaded plugins: fastestmirror
Cleaning repos: Docker
生成缓存
[root@localhost yum.repos.d]# yum makecache
Loaded plugins: fastestmirror
Determining fastest mirrors
Docker | 3.0 kB 00:00:00
centos | 3.6 kB 00:00:00
(1/7): Docker/filelists_db | 138 kB 00:00:00
(2/7): Docker/primary_db | 161 kB 00:00:00
(3/7): Docker/other_db | 80 kB 00:00:00
(4/7): centos/group_gz | 153 kB 00:00:00
(5/7): centos/filelists_db | 3.3 MB 00:00:00
(6/7): centos/primary_db | 3.3 MB 00:00:00
(7/7): centos/other_db | 1.3 MB 00:00:00
Metadata Cache Created
安装Docker
安装Docker
[root@localhost ~]# yum install -y docker-ce
启动Docker
[root@localhost ~]# systemctl enable docker --now
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
导入镜像
[root@localhost ~]# docker load -i sepb_elk_latest.tar
1
启动容器
修改配置文件,不然启动报错
[root@localhost ~]# vim /etc/sysctl.conf
添加
vm.max_map_count = 262144
生效
[root@localhost ~]# sysctl -p
vm.max_map_count = 262144
启动容器
[root@localhost ~]# docker run -it -d -p 5601:5601 -p 9200:9200 -p 5044:5044 --name elk -e ES_MIN_MEM=512m -e ES_MAX_MEM=1024m sebp/elk:latest
1
2
浏览器访问
http://IP:5601(华为云注意放通安全组)
安装filebeat,并配置连接elastic
安装
[root@localhost ~]# yum install -y filebeat-7.13.2-x86_64.rpm
1
配置连接 (默认就是连接elastic的,所以output可以不用改)
修改配置文件
[root@localhost ~]# vi /etc/filebeat/filebeat.yml
filebeat.inputs:
type: log
enabled: true
paths:
/var/log/yum.log
output.elasticsearch:
hosts: ["localhost:9200"]
启动filebeat
设置开机自启并现在启动
[root@localhost ~]# systemctl enable filebeat --now
Created symlink from /etc/systemd/system/multi-user.target.wants/filebeat.service to /usr/lib/systemd/system/filebeat.service.
查看状态
[root@localhost ~]# systemctl status filebeat
● filebeat.service - Filebeat sends log files to Logstash or directly to Elasticsearch.
Loaded: loaded (/usr/lib/systemd/system/filebeat.service; enabled; vendor preset: disabled)
Active: active (running) since Mon 2022-11-28 04:20:52 EST; 1min 1s ago
Docs: https://www.elastic.co/beats/filebeat
Main PID: 12835 (filebeat)
Tasks: 9
Memory: 124.6M
CGroup: /system.slice/filebeat.service
└─12835 /usr/share/filebeat/bin/filebeat --environment systemd -c /etc/filebeat/filebeat.yml --path.home /usr/share/filebeat --path.config /etc...
下一步
就会发现有数据了
我们下载一个httpd看看
root@localhost ~]# yum install -y httpd
编写cce_cluster_manager.py文件如下:import time
from huaweicloudsdkcce.v3 import *
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcce.v3.region.cce_region import CceRegion
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcce.v3 import *
import time
def get_aksk_v3_cce_token():
ak = ""
sk = ""
# EZA2ZDBZANMYYEHUHYBD
# GMK5KA5qYRmQiuaIpTyFay6QbZ1pohYGPBmAGklY
token = BasicCredentials(ak, sk)
client = CceClient.new_builder() \
.with_credentials(token) \
.with_region(CceRegion.value_of("cn-north-4")) \
.build()
return client
class Cluster_Manager:
def __init__(self,client):
self.client = client
def delete_same_cluster(self,name):
client = self.client
req = ListClustersRequest()
resp = client.list_clusters(req)
for cluster in resp.items:
if cluster.metadata.name == name:
clusterId = cluster.metadata.uid
delete_req = DeleteClusterRequest()
delete_req.cluster_id = clusterId
delete_req.delete_efs = "true"
delete_req.delete_evs = "true"
delete_req.delete_net = "true"
delete_req.delete_obs = "true"
delete_req.delete_sfs = "true"
delete_resp = client.delete_cluster(delete_req)
return ""
def create_cluster(self):
client = self.client
req = CreateClusterRequest()
metadataBody = ClusterMetadata(
name="chinaskillscce2022"
)
hostNetwork = HostNetwork(
vpc="",
subnet="",
)
Cidrs = [
ContainerCIDR(
cidr="10.0.0.0/16"
)
]
containetNetwork = ContainerNetwork(
mode="vpc-router",
cidrs=Cidrs,
)
SpecBody = ClusterSpec(
category="CCE",
type="VirtualMachine",
flavor="cce.s1.small",
version="v1.21",
host_network=hostNetwork,
container_network=containetNetwork,
kube_proxy_mode="iptables",
)
req.body = Cluster(
kind="Cluster",
api_version="v3",
metadata=metadataBody,
spec=SpecBody,
)
result = client.create_cluster(req)
def list_cluster(self):
client = self.client
req = ListClustersRequest()
req.detail = "true"
req.status = "Available"
req.type = "VirtualMachine"
req.version = "v1.21"
result = client.list_clusters(req)
print(result)
def get_clusterId_by_name(self,name):
client = self.client
req = ListClustersRequest()
resp = client.list_clusters(req)
for cluster in resp.items:
if cluster.metadata.name == name:
clusterId = cluster.metadata.uid
return clusterId
def show_cluster(self,clusterId):
client = self.client
req = ShowClusterRequest()
req.detail = "true"
req.cluster_id = clusterId
result = client.show_cluster(req)
print(result)
if __name__ == "__main__":
cluster_m = Cluster_Manager(get_aksk_v3_cce_token())
#delete_same = cluster_m.delete_same_cluster("chinaskillscce2022")
#time.sleep(240)
create_cluster = cluster_m.create_cluster()
#time.sleep(480)
#list_cluster = cluster_m.list_cluster()
clusterId = cluster_m.get_clusterId_by_name("chinaskillscce2022")
show_cluster = cluster_m.show_cluster(clusterId)
【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
cloudbbs@huaweicloud.com
- 点赞
- 收藏
- 关注作者
评论(0)