update
This commit is contained in:
23
998-常用脚本/a-部署脚本/deploy-nfs-server.sh
Normal file
23
998-常用脚本/a-部署脚本/deploy-nfs-server.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
#nfs_data_path="/var/lib/docker/nfs_data"
|
||||
nfs_data_path="/data/nfs_data"
|
||||
|
||||
|
||||
deploy_nfs_server(){
|
||||
mkdir -p $nfs_data_path
|
||||
chmod 777 $nfs_data_path
|
||||
|
||||
echo "${nfs_data_path} *(rw,no_root_squash,no_all_squash,sync)" >> /etc/exports
|
||||
|
||||
systemctl restart rpcbind
|
||||
systemctl restart nfs-server
|
||||
|
||||
systemctl enable rpcbind
|
||||
systemctl enable nfs-server
|
||||
|
||||
}
|
||||
deploy_nfs_server
|
||||
|
||||
|
||||
# docker login -u admin -p V2ryStr@ngPss 10.100.2.121:8033
|
||||
114
998-常用脚本/a-部署脚本/install_minio.sh
Normal file
114
998-常用脚本/a-部署脚本/install_minio.sh
Normal file
@@ -0,0 +1,114 @@
|
||||
#!/bin/bash
|
||||
|
||||
minio_local_path=/var/lib/docker/minio-pv/pv1
|
||||
harbor_host=192.168.233.100:8033
|
||||
inner_master_ip=192.168.233.100
|
||||
minio_host_ip=192.168.233.100
|
||||
|
||||
install_minio(){
|
||||
|
||||
docker login -u admin -p V2ryStr@ngPss ${harbor_host}
|
||||
|
||||
echo "start to create minio local path !"
|
||||
mkdir -p ${minio_local_path}
|
||||
chmod -R 777 ${minio_local_path}
|
||||
mkdir -p /root/wdd/install/
|
||||
|
||||
cat > /root/wdd/install/minio-docker-compose.yaml <<EOF
|
||||
version: '2'
|
||||
services:
|
||||
minio:
|
||||
image: ${harbor_host}/cmii/minio:RELEASE.2023-06-02T23-17-26Z
|
||||
container_name: live-minio
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
command: 'server /data --console-address ":9001"'
|
||||
restart: always
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: cmii
|
||||
MINIO_SECRET_KEY: B#923fC7mk
|
||||
volumes:
|
||||
- ${minio_local_path}:/data
|
||||
EOF
|
||||
|
||||
echo "start minio container !"
|
||||
docker-compose -f /root/wdd/install/minio-docker-compose.yaml up -d
|
||||
echo ""
|
||||
|
||||
}
|
||||
|
||||
install_docker_compose(){
|
||||
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-amd64 -o /usr/local/bin/docker-compose
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
}
|
||||
|
||||
init_minio(){
|
||||
echo "start to download mc!"
|
||||
if [[ ! -f /usr/local/bin/mc ]]; then
|
||||
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mc -o /usr/local/bin/mc
|
||||
chmod +x /usr/local/bin/mc
|
||||
fi
|
||||
|
||||
# curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /usr/local/bin/mc
|
||||
|
||||
echo ""
|
||||
sleep 5
|
||||
|
||||
export tenant_name=outside
|
||||
mc alias set ${tenant_name} http://${minio_host_ip}:9000 cmii B#923fC7mk
|
||||
|
||||
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata
|
||||
echo ""
|
||||
|
||||
echo "set rabbit mq"
|
||||
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
|
||||
echo ""
|
||||
|
||||
echo "sleep 5 s!"
|
||||
sleep 5
|
||||
|
||||
mc admin service restart ${tenant_name}
|
||||
|
||||
echo "sleep 5 s!"
|
||||
sleep 5
|
||||
echo ""
|
||||
|
||||
|
||||
echo "start to add event notification !"
|
||||
|
||||
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
|
||||
|
||||
mc ilm add --expiry-days "1" ${tenant_name}/tus
|
||||
|
||||
echo ""
|
||||
echo "done of init !"
|
||||
|
||||
}
|
||||
|
||||
# install_docker_compose
|
||||
|
||||
#install_minio
|
||||
#
|
||||
#if [[ $(docker inspect -f '{{.State.Running}}' install-minio1-1) -eq "true" ]]; then
|
||||
# echo "minio is running now! start to init minio!"
|
||||
# init_minio
|
||||
#fi
|
||||
|
||||
init_minio
|
||||
|
||||
|
||||
26
998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx-centos.sh
Normal file
26
998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx-centos.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
sudo yum install -y yum-utils
|
||||
|
||||
cat >/etc/yum.repos.d/nginx.repo<<EOF
|
||||
[nginx-stable]
|
||||
name=nginx stable repo
|
||||
baseurl=http://nginx.org/packages/centos/8/x86_64/
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
gpgkey=https://nginx.org/keys/nginx_signing.key
|
||||
module_hotfixes=true
|
||||
|
||||
[nginx-mainline]
|
||||
name=nginx mainline repo
|
||||
baseurl=http://nginx.org/packages/mainline/centos/8/x86_64/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
gpgkey=https://nginx.org/keys/nginx_signing.key
|
||||
module_hotfixes=true
|
||||
EOF
|
||||
|
||||
yum-config-manager --enable nginx-mainline
|
||||
|
||||
yum install -y nginx
|
||||
19
998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx.sh
Normal file
19
998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
sudo apt install -y curl gnupg2 ca-certificates lsb-release ubuntu-keyring
|
||||
|
||||
curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor \
|
||||
| sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null
|
||||
|
||||
gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg
|
||||
|
||||
echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \
|
||||
http://nginx.org/packages/ubuntu `lsb_release -cs` nginx" \
|
||||
| sudo tee /etc/apt/sources.list.d/nginx.list
|
||||
|
||||
echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" \
|
||||
| sudo tee /etc/apt/preferences.d/99nginx
|
||||
|
||||
sudo apt update
|
||||
sudo apt install -y nginx
|
||||
32
998-常用脚本/a-部署脚本/nginx暴露/真实nginx-offline-map.conf
Normal file
32
998-常用脚本/a-部署脚本/nginx暴露/真实nginx-offline-map.conf
Normal file
@@ -0,0 +1,32 @@
|
||||
server {
|
||||
listen 8889;
|
||||
server_name localhost;
|
||||
#允许跨域请求的域,*代表所有
|
||||
add_header 'Access-Control-Allow-Origin' *;
|
||||
#允许带上cookie请求
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
#允许请求的方法,比如 GET/POST/PUT/DELETE
|
||||
add_header 'Access-Control-Allow-Methods' *;
|
||||
#允许请求的header
|
||||
add_header 'Access-Control-Allow-Headers' *;
|
||||
|
||||
location /electronic {]
|
||||
root /root/offline_map/;
|
||||
autoindex on;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Allow-Methods 'GET,POST';
|
||||
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
}
|
||||
|
||||
location /satellite {
|
||||
root /root/offline_map/;
|
||||
autoindex on;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Allow-Methods 'GET,POST';
|
||||
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
}
|
||||
|
||||
# http://192.168.6.6:8889/electronic/{z}/{x}/{y}.png
|
||||
# http://192.168.6.6:8889/satellite/{z}/{x}/{y}.png
|
||||
# /root/offline_map/satellite /root/offline_map/electronic
|
||||
}
|
||||
50
998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf
Normal file
50
998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf
Normal file
@@ -0,0 +1,50 @@
|
||||
upstream proxy_server {
|
||||
ip_hash;
|
||||
server 172.16.100.55:30500;
|
||||
server 172.16.100.59:30500;
|
||||
server 172.16.100.60:30500;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8088;
|
||||
server_name localhost;
|
||||
location / {
|
||||
proxy_pass http://proxy_server;
|
||||
client_max_body_size 5120m;
|
||||
client_body_buffer_size 5120m;
|
||||
client_body_timeout 6000s;
|
||||
proxy_send_timeout 10000s;
|
||||
proxy_read_timeout 10000s;
|
||||
proxy_connect_timeout 600s;
|
||||
proxy_max_temp_file_size 5120m;
|
||||
proxy_request_buffering on;
|
||||
proxy_buffering off;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 12k;
|
||||
proxy_set_header Host fake-domain.eedsjc-uavms.io;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /_AMapService/v4/map/styles {
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://webapi.amap.com/v4/ap/styles;
|
||||
}
|
||||
|
||||
location /_AMapService/ {
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://restapi.amap.com/;
|
||||
}
|
||||
|
||||
location /rtc/v1/ {
|
||||
add_header Access-Control-Allow-Headers X-Requested-With;
|
||||
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
|
||||
proxy_pass http://127.0.0.1:30985/rtc/v1/;
|
||||
}
|
||||
|
||||
location ~ ^/\w*/actuator/ {
|
||||
return 403;
|
||||
}
|
||||
}
|
||||
44
998-常用脚本/a-部署脚本/nginx暴露/真实的nginx配置.conf
Normal file
44
998-常用脚本/a-部署脚本/nginx暴露/真实的nginx配置.conf
Normal file
@@ -0,0 +1,44 @@
|
||||
user root;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
use epoll;
|
||||
worker_connections 65535;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
server_tokens off;
|
||||
sendfile on;
|
||||
send_timeout 1200;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 600;
|
||||
types_hash_max_size 2048;
|
||||
|
||||
client_max_body_size 2048m;
|
||||
client_body_buffer_size 2048m;
|
||||
underscores_in_headers on;
|
||||
|
||||
proxy_send_timeout 600;
|
||||
proxy_read_timeout 600;
|
||||
proxy_connect_timeout 600;
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 8 256k;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
stream {
|
||||
include /etc/nginx/conf.d/stream/*.conf;
|
||||
}
|
||||
17
998-常用脚本/a-部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml
Normal file
17
998-常用脚本/a-部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
cmii-nginx:
|
||||
image: 172.16.100.55:8033/cmii/nginx:1.27.0
|
||||
volumes:
|
||||
- /etc/nginx/conf.d:/etc/nginx/conf.d
|
||||
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
|
||||
- /root/offline_map:/root/offline_map
|
||||
ports:
|
||||
- "8088:8088"
|
||||
- "8089:8089"
|
||||
restart: always
|
||||
|
||||
|
||||
# mkdir -p /etc/nginx/conf.d
|
||||
# touch /etc/nginx/nginx.conf
|
||||
45
998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh
Normal file
45
998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
modify_ingress_nginx_host_network(){
|
||||
echo "start to modify hostnetwork to false !"
|
||||
kubectl patch daemonset nginx-ingress-controller -n ingress-nginx --patch '{"spec":{"template":{"spec":{"hostNetwork": false}}}}'
|
||||
echo ""
|
||||
kubectl get daemonset -n ingress-nginx nginx-ingress-controller -o jsonpath='{.spec.template.spec.hostNetwork}'
|
||||
}
|
||||
|
||||
build_for_ingress_nginx_node_port(){
|
||||
echo "start to write ingress nginx node port service !"
|
||||
mkdir -p /root/wdd/install/
|
||||
cat >>/root/wdd/install/k8s-ingress-nginx.yaml<<EOF
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress-nginx-service
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
nodePort: 30500
|
||||
- name: https
|
||||
protocol: TCP
|
||||
port: 443
|
||||
targetPort: 443
|
||||
nodePort: 31500
|
||||
selector:
|
||||
app: ingress-nginx
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
EOF
|
||||
echo ""
|
||||
kubectl apply -f /root/wdd/install/k8s-ingress-nginx.yaml
|
||||
}
|
||||
|
||||
modify_ingress_nginx_host_network
|
||||
|
||||
build_for_ingress_nginx_node_port
|
||||
|
||||
|
||||
68
998-常用脚本/a-部署脚本/z_执行apply命令.sh
Normal file
68
998-常用脚本/a-部署脚本/z_执行apply命令.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkdir /root/.kube
|
||||
cp kube_config_cluster.yml /root/.kube/config
|
||||
|
||||
kubectl apply -f k8s-dashboard.yaml
|
||||
kubectl delete -f k8s-dashboard.yaml
|
||||
|
||||
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
|
||||
|
||||
kubectl apply -f k8s-nfs.yaml
|
||||
kubectl delete -f k8s-nfs.yaml
|
||||
|
||||
kubectl -n kube-system describe pod $(kubectl -n kube-system get pods | grep nfs-client-provisioner | awk '{print$1}')
|
||||
|
||||
kubectl apply -f k8s-nfs-test.yaml
|
||||
kubectl delete -f k8s-nfs-test.yaml
|
||||
|
||||
cd /var/lib/docker/nfs_data
|
||||
|
||||
kubectl apply -f k8s-pvc.yaml
|
||||
kubectl delete -f k8s-pvc.yaml
|
||||
|
||||
kubectl apply -f k8s-mongo.yaml
|
||||
kubectl delete -f k8s-mongo.yaml
|
||||
|
||||
vim k8s-emqx.yaml
|
||||
kubectl apply -f k8s-emqx.yaml
|
||||
kubectl delete -f k8s-emqx.yaml
|
||||
|
||||
kubectl apply -f k8s-rabbitmq.yaml
|
||||
kubectl delete -f k8s-rabbitmq.yaml
|
||||
|
||||
kubectl apply -f k8s-redis.yaml
|
||||
kubectl delete -f k8s-redis.yaml
|
||||
|
||||
kubectl apply -f k8s-mysql.yaml
|
||||
kubectl delete -f k8s-mysql.yaml
|
||||
|
||||
----
|
||||
|
||||
kubectl apply -f k8s-nacos.yaml
|
||||
kubectl delete -f k8s-nacos.yaml
|
||||
|
||||
---
|
||||
|
||||
vim k8s-configmap.yaml
|
||||
kubectl apply -f k8s-configmap.yaml
|
||||
kubectl delete -f k8s-configmap.yaml
|
||||
|
||||
vi k8s-ingress.yaml
|
||||
kubectl apply -f k8s-ingress.yaml
|
||||
kubectl delete -f k8s-ingress.yaml
|
||||
|
||||
vi k8s-frontend.yaml
|
||||
kubectl apply -f k8s-frontend.yaml
|
||||
kubectl delete -f k8s-frontend.yaml
|
||||
|
||||
vi k8s-backend.yaml
|
||||
kubectl apply -f k8s-backend.yaml
|
||||
kubectl delete -f k8s-backend.yaml
|
||||
|
||||
|
||||
kubectl -n kube-system get pods -o jsonpath='{.items[*].metadata.name}'
|
||||
|
||||
vi k8s-srs.yaml
|
||||
kubectl apply -f k8s-srs.yaml
|
||||
kubectl delete -f k8s-srs.yaml
|
||||
16
998-常用脚本/a-部署脚本/为node打标签.sh
Normal file
16
998-常用脚本/a-部署脚本/为node打标签.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
|
||||
|
||||
kubectl get nodes --show-labels
|
||||
|
||||
kubectl label nodes 172.31.2.7 uavcloud.env=szgz --overwrite
|
||||
kubectl label nodes 172.31.2.8 uavcloud.env=szgz --overwrite
|
||||
kubectl label nodes 172.31.2.9 uavcloud.env=szgz --overwrite
|
||||
kubectl label nodes 192.168.233.100 uavcloud.affinity=common --overwrite
|
||||
|
||||
|
||||
# 删除label
|
||||
kubectl label nodes 192.168.40.193 uavcloud.env-
|
||||
|
||||
# 获取特定label的节点
|
||||
kubectl get nodes -l uavcloud.affinity=common
|
||||
|
||||
46
998-常用脚本/a-部署脚本/开机启动的脚本.txt
Normal file
46
998-常用脚本/a-部署脚本/开机启动的脚本.txt
Normal file
@@ -0,0 +1,46 @@
|
||||
vim /etc/systemd/system/cmii-startup.service
|
||||
|
||||
|
||||
[Unit]
|
||||
Description=Cmii Start Up Script
|
||||
|
||||
[Service]
|
||||
ExecStart=/bin/bash /cmii/start-up.sh
|
||||
User=root
|
||||
Group=root
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
|
||||
vim /cmii/start-up.sh
|
||||
|
||||
docker-compose -f /cmii/harbor/docker-compose.yml up -d
|
||||
sleep 10
|
||||
|
||||
docker-compose -f /cmii/0-minio-dockercompose.yml up -d
|
||||
|
||||
rm -rf /nfsdata/zhbf-helm-emqxs-pvc-fdb605a0-5120-481a-bdd5-7ef1213c2363/
|
||||
|
||||
sleep 5
|
||||
|
||||
kubectl delete -n zhbf pod helm-nacos-0 --force
|
||||
kubectl delete -n zhbf pod helm-emqxs-0 --force
|
||||
kubectl delete -n zhbf pod helm-redis-master-0 --force
|
||||
kubectl delete -n zhbf pod helm-redis-replicas-0 --force
|
||||
|
||||
sleep 30
|
||||
|
||||
for kindof in pods
|
||||
do
|
||||
kubectl -n zhbf delete $kindof $(kubectl -n zhbf get $kindof | grep "cmii"| awk '{print$1}')
|
||||
done
|
||||
|
||||
|
||||
chmod +x /cmii/start-up.sh
|
||||
|
||||
|
||||
systemctl daemon-reload
|
||||
sudo systemctl enable cmii-startup.service
|
||||
|
||||
7
998-常用脚本/a-部署脚本/手动创建harbor仓库.sh
Normal file
7
998-常用脚本/a-部署脚本/手动创建harbor仓库.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
|
||||
|
||||
export harbor_host=172.16.100.55:8033
|
||||
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects
|
||||
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"rancher","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects
|
||||
64
998-常用脚本/a-部署脚本/清理rke集群的安装.sh
Normal file
64
998-常用脚本/a-部署脚本/清理rke集群的安装.sh
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
all_ip_list=(172.18.10.8 172.18.10.239 172.18.10.231 172.18.10.198)
|
||||
|
||||
clean_octopus_agent() {
|
||||
# 无法运行
|
||||
local server
|
||||
for server in "${all_ip_list[@]}"; do
|
||||
echo "the current server is ${server}"
|
||||
ssh root@"${server}" "echo yes"
|
||||
ssh root@"${server}" "docker container stop $(docker ps -aq) && docker prune -y"
|
||||
ssh root@"${server}" "for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done"
|
||||
ssh root@"${server}" "rm -rf /etc/ceph /etc/cni /etc/kubernetes /etc/rancher /opt/cni /opt/rke /run/secrets/kubernetes.io /run/calico /run/flannel /var/lib/calico /var/lib/etcd /var/lib/cni /var/lib/kubelet /var/lib/rancher /var/log/containers /var/log/kube-audit /var/log/pods /var/run/calico"
|
||||
ssh root@"${server}" "ip link delete flannel.1"
|
||||
ssh root@"${server}" "ip link delete cni0"
|
||||
ssh root@"${server}" "ip link delete tunl0"
|
||||
ssh root@"${server}" "rmmod ipip"
|
||||
ssh root@"${server}" "iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F"
|
||||
done
|
||||
}
|
||||
|
||||
clean_rke_cluster() {
|
||||
|
||||
k8s_componet=(kubelet kube-proxy kube-apiserver kube-controller-manager kube-scheduler etcd)
|
||||
for componet in ${k8s_componet[@]}; do docker container stop $componet && docker container rm $componet; done
|
||||
|
||||
|
||||
for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done
|
||||
|
||||
rm -rf /etc/ceph \
|
||||
/etc/cni \
|
||||
/etc/kubernetes \
|
||||
/etc/rancher \
|
||||
/opt/cni \
|
||||
/opt/rke \
|
||||
/run/secrets/kubernetes.io \
|
||||
/run/calico \
|
||||
/run/flannel \
|
||||
/var/lib/calico \
|
||||
/var/lib/etcd \
|
||||
/var/lib/cni \
|
||||
/var/lib/kubelet \
|
||||
/var/lib/rancher /var/log/containers \
|
||||
/var/log/kube-audit \
|
||||
/var/log/pods \
|
||||
/var/run/calico
|
||||
|
||||
ip link delete flannel.1
|
||||
ip link delete cni0
|
||||
ip link delete tunl0
|
||||
rmmod ipip
|
||||
|
||||
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F
|
||||
ip6tables -F && ip6tables -t nat -F && ip6tables -t mangle -F && ip6tables -t raw -F
|
||||
|
||||
|
||||
rke remove --force
|
||||
printf "y/n" | docker container prune
|
||||
rke -d up
|
||||
|
||||
}
|
||||
|
||||
|
||||
clean_rke_cluster
|
||||
20
998-常用脚本/a-部署脚本/编辑calico状态.sh
Normal file
20
998-常用脚本/a-部署脚本/编辑calico状态.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# 修改calico-node检测的IP
|
||||
kubectl -n kube-system edit daemonset calico-node
|
||||
env:
|
||||
- name: FELIX_INTERFACEPREFIX
|
||||
value: "eth0"
|
||||
|
||||
# 更加保险
|
||||
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens18
|
||||
|
||||
|
||||
# 删除所有的calico pod
|
||||
kubectl delete pods --namespace=kube-system -l k8s-app=calico-node
|
||||
|
||||
|
||||
node_name=192.168.40.74
|
||||
kubectl cordon ${node_name}
|
||||
kubectl drain --ignore-daemonsets --delete-emptydir-data ${node_name}
|
||||
Reference in New Issue
Block a user