This commit is contained in:
zeaslity
2024-10-30 16:30:51 +08:00
commit 437acbeb63
3363 changed files with 653948 additions and 0 deletions

View File

@@ -0,0 +1,16 @@
#!/bin/bash
# internet
bash <(curl -sL http://42.192.52.227:9000/octopus/init-script-wdd.sh) --url http://42.192.52.227:9000/octopus --agent-install --offline
# no internet
export offline_minio=10.250.0.100
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --help
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --zsh --tools --cn
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --agent-install --offline
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --agent-update --offline

View File

@@ -0,0 +1,8 @@
#!/bin/bash
umount /var/lib/docker
vgremove datavg
pvremove /dev/vdb1

View File

@@ -0,0 +1,96 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
yum install lvm2 -y
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/sdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/sdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
#mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'
echo "n
p
t
8e
w
" | fdisk /dev/vda
partprobe
vgextend klas_host-10-190-202-141 /dev/vda4
lvextend -l +100%FREE /dev/mapper/klas_host--10--190--202--141-root
partprobe
xfs_growfs /dev/mapper/klas_host--10--190--202--141-root
df -TH

View File

@@ -0,0 +1,10 @@
#!/bin/bash
sudo mkfs.ext4 /dev/vdb
sudo mkdir -p /var/lib/docker
sudo mount /dev/vdb /var/lib/docker
echo '/dev/vdb /var/lib/docker ext4 defaults 0 2' | sudo tee -a /etc/fstab
sudo mount -a
df -h

View File

@@ -0,0 +1,46 @@
#!/bin/bash
# 替换namespace
# 替换minio的实际地址和端口
# 修改rabbitmq的实际地址和端口需要暴露出来
curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /usr/local/bin/mc
chmod +x /usr/local/bin/mc
export tenant_name=uavcloud-dev
mc alias set ${tenant_name} https://minio.ig-dev.uavcmlc.com cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls
# mc alias set demo https://oss.demo.uavcmlc.com:18000 cmii B#923fC7mk
#
# mc cp -r demo/jadenq/scenariomock/xg/ ${tenant_name}/jadenq/scenariomock/xg/
# mc cp -r demo/jadenq/application/file/中移凌云使用手册.pdf ${tenant_name}/jadenq/application/file/中移凌云使用手册.pdf
# mc cp -r demo/jadenq/defimage/def.jpg ${tenant_name}/jadenq/defimage/def.jpg
# mc cp -r demo/pub-cms/application/img/ ${tenant_name}/pub-cms/application/img/
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@10.250.0.200:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
sleep 5
mc admin service restart ${tenant_name}
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus

View File

@@ -0,0 +1,23 @@
#!/bin/bash
nfs_data_path="/var/lib/docker/nfs_data"
#nfs_data_path="/data/nfs_data"
deploy_nfs_server(){
mkdir -p $nfs_data_path
chmod 777 $nfs_data_path
echo "${nfs_data_path} *(rw,no_root_squash,no_all_squash,sync)" >> /etc/exports
systemctl restart rpcbind
systemctl restart nfs-server
systemctl enable rpcbind
systemctl enable nfs-server
}
deploy_nfs_server
# docker login -u admin -p V2ryStr@ngPss 10.100.2.121:8033

View File

@@ -0,0 +1,111 @@
#!/bin/bash
minio_local_path=/var/lib/docker/minio-pv/pv1
harbor_host=10.20.1.135:8033
inner_master_ip=10.20.1.135
minio_host_ip=110.20.1.139
install_minio(){
echo "start to create minio local path !"
mkdir -p ${minio_local_path}
chmod -R 777 ${minio_local_path}
mkdir -p /root/wdd/install/
cat > /root/wdd/install/minio-docker-compose.yaml <<EOF
version: '2'
services:
minio1:
ports:
- "9000:9000"
- "9001:9001"
image: '${harbor_host}/cmii/minio:2022.5.4'
environment:
- MINIO_ROOT_USER=cmii
- MINIO_ROOT_PASSWORD=B#923fC7mk
restart: always
volumes:
- ${minio_local_path}:/data
EOF
echo "start minio container !"
docker-compose -f /root/wdd/install/minio-docker-compose.yaml up -d
echo ""
}
install_docker_compose(){
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-amd64 -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
}
init_minio(){
echo "start to download mc!"
if [[ ! -f /usr/local/bin/mc ]]; then
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mc -o /usr/local/bin/mc
chmod +x /usr/local/bin/mc
fi
# curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /usr/local/bin/mc
echo ""
sleep 5
export tenant_name=outside
mc alias set ${tenant_name} http://${minio_host_ip}:9000 cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata
echo ""
echo "set rabbit mq"
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
echo ""
echo "sleep 5 s!"
sleep 5
mc admin service restart ${tenant_name}
echo "sleep 5 s!"
sleep 5
echo ""
echo "start to add event notification !"
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus
echo ""
echo "done of init !"
}
#install_docker_compose
install_minio
if [[ $(docker inspect -f '{{.State.Running}}' install-minio1-1) -eq "true" ]]; then
echo "minio is running now! start to init minio!"
init_minio
fi
#init_minio

View File

@@ -0,0 +1,26 @@
#!/bin/bash
sudo yum install -y yum-utils
cat >/etc/yum.repos.d/nginx.repo<<EOF
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/8/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
[nginx-mainline]
name=nginx mainline repo
baseurl=http://nginx.org/packages/mainline/centos/8/x86_64/
gpgcheck=1
enabled=0
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
EOF
yum-config-manager --enable nginx-mainline
yum install -y nginx

View File

@@ -0,0 +1,19 @@
#!/bin/bash
sudo apt install -y curl gnupg2 ca-certificates lsb-release ubuntu-keyring
curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor \
| sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null
gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \
http://nginx.org/packages/ubuntu `lsb_release -cs` nginx" \
| sudo tee /etc/apt/sources.list.d/nginx.list
echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" \
| sudo tee /etc/apt/preferences.d/99nginx
sudo apt update
sudo apt install -y nginx

View File

@@ -0,0 +1,32 @@
server {
listen 8889;
server_name localhost;
#允许跨域请求的域,*代表所有
add_header 'Access-Control-Allow-Origin' *;
#允许带上cookie请求
add_header 'Access-Control-Allow-Credentials' 'true';
#允许请求的方法,比如 GET/POST/PUT/DELETE
add_header 'Access-Control-Allow-Methods' *;
#允许请求的header
add_header 'Access-Control-Allow-Headers' *;
location /electronic {
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
location /satellite {
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
# http://192.168.6.6:8889/electronic/{z}/{x}/{y}.png
# http://192.168.6.6:8889/satellite/{z}/{x}/{y}.png
# /root/offline_map/satellite /root/offline_map/electronic
}

View File

@@ -0,0 +1,43 @@
server {
listen 8088;
server_name localhost;
location / {
proxy_pass http://localhost:30500;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.jxejpt.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -0,0 +1,44 @@
user root;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
use epoll;
worker_connections 65535;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
server_tokens off;
sendfile on;
send_timeout 1200;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 600;
types_hash_max_size 2048;
client_max_body_size 2048m;
client_body_buffer_size 2048m;
underscores_in_headers on;
proxy_send_timeout 600;
proxy_read_timeout 600;
proxy_connect_timeout 600;
proxy_buffer_size 128k;
proxy_buffers 8 256k;
include /etc/nginx/conf.d/*.conf;
}
stream {
include /etc/nginx/conf.d/stream/*.conf;
}

View File

@@ -0,0 +1,13 @@
version: '3'
services:
cmii-nginx:
image: 10.20.1.135:8033/cmii/nginx:1.21.3
volumes:
- /etc/nginx/conf.d:/etc/nginx/conf.d
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
- /root/offline_map:/root/offline_map
ports:
- "8088:8088"
- "8089:8089"
restart: always

View File

@@ -0,0 +1,45 @@
#!/bin/bash
modify_ingress_nginx_host_network(){
echo "start to modify hostnetwork to false !"
kubectl patch daemonset nginx-ingress-controller -n ingress-nginx --patch '{"spec":{"template":{"spec":{"hostNetwork": false}}}}'
echo ""
kubectl get daemonset -n ingress-nginx nginx-ingress-controller -o jsonpath='{.spec.template.spec.hostNetwork}'
}
build_for_ingress_nginx_node_port(){
echo "start to write ingress nginx node port service !"
mkdir -p /root/wdd/install/
cat >>/root/wdd/install/k8s-ingress-nginx.yaml<<EOF
kind: Service
apiVersion: v1
metadata:
name: ingress-nginx-service
namespace: ingress-nginx
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
nodePort: 30500
- name: https
protocol: TCP
port: 443
targetPort: 443
nodePort: 31500
selector:
app: ingress-nginx
type: NodePort
sessionAffinity: None
EOF
echo ""
kubectl apply -f /root/wdd/install/k8s-ingress-nginx.yaml
}
modify_ingress_nginx_host_network
build_for_ingress_nginx_node_port

View File

@@ -0,0 +1,33 @@
#!/bin/bash
need_to_import_sql_folder="/home/wdd/Documents/master_data_5.2.0"
target_host_ip=20.4.13.81
target_host_port=33306
target_user=root
target_passwd=QzfXQhd3bQ
full_back_up_database(){
echo "yes"
}
batch_import_sql_struct_to_server(){
for sql_file in $(ls ${need_to_import_sql_folder} | sort -n); do
if [ -d $need_to_import_sql_folder/${sql_file} ];then
continue
fi
echo "current file is $need_to_import_sql_folder/${sql_file}"
/root/wdd/mysql/bin/mysql -u${target_user} -p${target_passwd} -h${target_host_ip} -P${target_host_port} <"$need_to_import_sql_folder/${sql_file}"
echo "------------------"
echo ""
done
}
batch_import_sql_struct_to_server

View File

@@ -0,0 +1,38 @@
#!/bin/bash
minio_inner_ip_host=10.129.80.223:9000
download_ts2mp4_file(){
echo ""
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/ts2mp4_docker_image_v1.tar.gz
echo ""
echo ""
echo ""
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/pack_ts2mp4_release-0521.tar.gz
}
bootup_ts2mp4(){
if [[ -f ts2mp4_docker_image_v1.tar.gz ]]; then
echo "start to load ts2mp4 image file !"
docker load -i ts2mp4_docker_image_v1.tar.gz
echo ""
echo "init ts2mp4 config!"
tar -zvxf pack_ts2mp4_x64-0724.tar.gz
echo "start to modify!"
sed -i "s/https:\/\/minio.ig-uat.uavcmlc.com:31500/http:\/\/$minio_inner_ip_host/g" $(pwd)/pack_ts2mp4_release/server_config_docker.ini
bash $(pwd)/pack_ts2mp4_release/run_docker.sh
sleep 3
if docker ps | grep -q ts2mp4; then
echo "ts2mp4 started successful !"
else
echo "ts2mp4 FAILED!"
fi
fi
}
#download_ts2mp4_file
bootup_ts2mp4

View File

@@ -0,0 +1,46 @@
vim /etc/systemd/system/cmii-startup.service
[Unit]
Description=Cmii Start Up Script
[Service]
ExecStart=/bin/bash /cmii/start-up.sh
User=root
Group=root
[Install]
WantedBy=multi-user.target
vim /cmii/start-up.sh
docker-compose -f /cmii/harbor/docker-compose.yml up -d
sleep 10
docker-compose -f /cmii/0-minio-dockercompose.yml up -d
rm -rf /nfsdata/zhbf-helm-emqxs-pvc-fdb605a0-5120-481a-bdd5-7ef1213c2363/
sleep 5
kubectl delete -n zhbf pod helm-nacos-0 --force
kubectl delete -n zhbf pod helm-emqxs-0 --force
kubectl delete -n zhbf pod helm-redis-master-0 --force
kubectl delete -n zhbf pod helm-redis-replicas-0 --force
sleep 30
for kindof in pods
do
kubectl -n zhbf delete $kindof $(kubectl -n zhbf get $kindof | grep "cmii"| awk '{print$1}')
done
chmod +x /cmii/start-up.sh
systemctl daemon-reload
sudo systemctl enable cmii-startup.service

View File

@@ -0,0 +1,7 @@
export harbor_host=10.129.80.218:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"rancher","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -0,0 +1,57 @@
#!/bin/bash
all_ip_list=(172.18.10.8 172.18.10.239 172.18.10.231 172.18.10.198)
clean_octopus_agent() {
# 无法运行
local server
for server in "${all_ip_list[@]}"; do
echo "the current server is ${server}"
ssh root@"${server}" "echo yes"
ssh root@"${server}" "docker container stop $(docker ps -aq) && docker prune -y"
ssh root@"${server}" "for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done"
ssh root@"${server}" "rm -rf /etc/ceph /etc/cni /etc/kubernetes /etc/rancher /opt/cni /opt/rke /run/secrets/kubernetes.io /run/calico /run/flannel /var/lib/calico /var/lib/etcd /var/lib/cni /var/lib/kubelet /var/lib/rancher /var/log/containers /var/log/kube-audit /var/log/pods /var/run/calico"
ssh root@"${server}" "ip link delete flannel.1"
ssh root@"${server}" "ip link delete cni0"
ssh root@"${server}" "ip link delete tunl0"
ssh root@"${server}" "rmmod ipip"
ssh root@"${server}" "iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F"
done
}
clean_rke_cluster() {
k8s_componet=(kubelet kube-proxy kube-apiserver kube-controller-manager kube-scheduler etcd)
for componet in ${k8s_componet[@]}; do docker container stop $componet && docker container rm $componet; done
for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done
rm -rf /etc/ceph \
/etc/cni \
/etc/kubernetes \
/etc/rancher \
/opt/cni \
/opt/rke \
/run/secrets/kubernetes.io \
/run/calico \
/run/flannel \
/var/lib/calico \
/var/lib/etcd \
/var/lib/cni \
/var/lib/kubelet \
/var/lib/rancher /var/log/containers \
/var/log/kube-audit \
/var/log/pods \
/var/run/calico
ip link delete flannel.1
ip link delete cni0
ip link delete tunl0
rmmod ipip
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F
}
clean_rke_cluster

View File

@@ -0,0 +1,15 @@
#!/bin/bash
# 修改calico-node检测的IP
kubectl -n kube-system edit daemonset calico-node
env:
- name: FELIX_INTERFACEPREFIX
value: "eth0"
# 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
# 删除所有的calico pod
kubectl delete pods --namespace=kube-system -l k8s-app=calico-node