add
This commit is contained in:
52
998-常用脚本/AI安装/install_nvidia_docker.sh
Normal file
52
998-常用脚本/AI安装/install_nvidia_docker.sh
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
download_all_dependency(){
|
||||
mkdir -p /root/wdd/install/ai-run/
|
||||
cd /root/wdd/install/ai-run/
|
||||
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/ai/nvidia-docker-toolkit-ubuntu.zip
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/ai/cmlc-ai-operator_v5.4.0-v100-all.tar.gz
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/ai/config.yaml
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/ai/drone_detect_model_20240903.zip
|
||||
}
|
||||
|
||||
install_nvidia_docker_runtime(){
|
||||
|
||||
unzip nvidia-docker-toolkit-ubuntu.zip
|
||||
|
||||
cd dev_files
|
||||
|
||||
dpkg -i libnvidia-container1_1.12.0-1_amd64.deb
|
||||
dpkg -i libnvidia-container-tools_1.12.0-1_amd64.deb
|
||||
dpkg -i nvidia-container-toolkit-base_1.12.0-1_amd64.deb
|
||||
dpkg -i nvidia-container-toolkit_1.12.0-1_amd64.deb
|
||||
dpkg -i nvidia-container-runtime_3.12.0-1_all.deb
|
||||
dpkg -i nvidia-docker2_2.12.0-1_all.deb
|
||||
|
||||
systemctl restart docker
|
||||
}
|
||||
|
||||
start_ai_process(){
|
||||
|
||||
docker stop cmlc-ai && docker rm cmlc-ai
|
||||
|
||||
# a100 model
|
||||
docker run -itd -p2333:2333 \
|
||||
--name cmlc-ai --runtime=nvidia --shm-size=8g \
|
||||
--restart=always \
|
||||
-v /root/wdd/install/ai-run/config.yaml:/cmii/cmlc-project-ai-streaming-engine/src/config.yaml \
|
||||
harbor.cdcyy.com.cn/cmii/cmlc-ai/cmlc-ai-operator:v5.4.0-v100-all
|
||||
|
||||
# t4-model
|
||||
docker run -itd -p2333:2333 \
|
||||
--name cmlc-ai --runtime=nvidia --shm-size=8g \
|
||||
--restart=always \
|
||||
-v /root/wdd/ai-run/config.yaml:/cmii/cmlc-project-ai-streaming-engine/src/config.yaml \
|
||||
-v /root/wdd/ai-run/drone-t4.engine:/cmii/cmlc-project-ai-streaming-engine/models/yolov8/drone.engine \
|
||||
harbor.cdcyy.com.cn/cmii/cmlc-ai/cmlc-ai-operator:v5.4.0-v100-all
|
||||
|
||||
|
||||
}
|
||||
|
||||
download_all_dependency
|
||||
20
998-常用脚本/AI安装/nvdia-docker-install-废弃.sh
Normal file
20
998-常用脚本/AI安装/nvdia-docker-install-废弃.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
^
|
||||
|
||||
|
||||
|
||||
|
||||
# nvidia-docker需要安装,外挂的形式
|
||||
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
|
||||
|
||||
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
|
||||
&& curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
|
||||
&& curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
|
||||
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
|
||||
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||
|
||||
|
||||
sudo apt-get update
|
||||
apt-get install -y nvidia-container-toolkit
|
||||
nvidia-ctk runtime configure --runtime=docker
|
||||
systemctl restart docker
|
||||
24
998-常用脚本/uavcloud-被cmii_operator替代/K8S相关脚本.sh
Normal file
24
998-常用脚本/uavcloud-被cmii_operator替代/K8S相关脚本.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 获取命令空间 uavcloud-uat 中所有部署(deployment)的镜像名称
|
||||
kubectl get deployments -n uavcloud-demo -o=jsonpath='{range .items[*]}{.spec.template.spec.containers[*].image}{"\n"}{end}'
|
||||
|
||||
# 获取命名空间 uavcloud-uat种所有的configmap,并且保存为独立的文件,文件名为configmap名
|
||||
mkdir configmaps
|
||||
kubectl get configmaps -n uavcloud-uat -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | while read configmap; do
|
||||
kubectl get configmap $configmap -n uavcloud-uat -o yaml > ./configmaps/$configmap.yaml
|
||||
done
|
||||
|
||||
# 获取命名空间 uavcloud-uat种所有的ingress,并且保存为独立的文件,文件名为ingress名
|
||||
mkdir ingresses
|
||||
kubectl get ingress -n uavcloud-uat -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | while read ingress; do
|
||||
kubectl get ingress $ingress -n uavcloud-uat -o yaml > ./ingresses/$ingress.yaml
|
||||
done
|
||||
|
||||
|
||||
# kubectl删除所有命名空间的状态不正常的pod
|
||||
kubectl get pods --all-namespaces | awk '{ if($3 != "Running" && $3 != "Completed") print "kubectl delete pod " $2 " -n " $1 }' | sh
|
||||
|
||||
|
||||
# 获取demo空间所有deployment的名称
|
||||
kubectl get deployments -n uavcloud-demo -o=jsonpath='{.items [*].metadata.name}'
|
||||
1
998-常用脚本/uavcloud-被cmii_operator替代/k8s-消灭全部状态不正常的Pod.sh
Normal file
1
998-常用脚本/uavcloud-被cmii_operator替代/k8s-消灭全部状态不正常的Pod.sh
Normal file
@@ -0,0 +1 @@
|
||||
#!/bin/bash
|
||||
39
998-常用脚本/uavcloud-被cmii_operator替代/uavcloud-删除状态为异常的Pod.sh
Normal file
39
998-常用脚本/uavcloud-被cmii_operator替代/uavcloud-删除状态为异常的Pod.sh
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
all_unhealthy_pod_list=""
|
||||
|
||||
all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-uat uavcloud-dev uavcloud-devflight uavcloud-devoperation)
|
||||
clean_up_log="clean_unhealthy_pod_log_$(date +'%Y-%m-%d-%H-%M-%S')"
|
||||
mkdir -p /root/wdd
|
||||
touch "/root/wdd/${clean_up_log}"
|
||||
echo "clean log is => /root/wdd/$clean_up_log"
|
||||
|
||||
echo ""
|
||||
for name_space in "${all_cmii_name_space[@]}"; do
|
||||
echo "[NAMESPACE] - start to deal with namespace [$name_space]"
|
||||
if ! kubectl get ns "$name_space"; then
|
||||
echo "[NAMESPACE] - namespace of [$name_space] not exists !"
|
||||
echo ""
|
||||
continue
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2207
|
||||
all_unhealthy_pod_list=($(kubectl get pods --field-selector=status.phase!=Running,status.phase!=Succeeded -n "$name_space" -o=jsonpath='{.items[*].metadata.name}' | tr " " "\n"))
|
||||
for unhealthy_pod in "${all_unhealthy_pod_list[@]}"; do
|
||||
echo ""
|
||||
if ! echo "$unhealthy_pod" | grep -q "mysql|redis|rabbit|mongo|nacos|srs"; then
|
||||
clean_log="[clean_up] start to clean namespace [$name_space] unhealthy pod of => $unhealthy_pod"
|
||||
echo "${clean_log}" >>"/root/wdd/$clean_up_log"
|
||||
echo "${clean_log}"
|
||||
kubectl -n "$name_space" delete pod "$unhealthy_pod" --force
|
||||
else
|
||||
clean_log="[clean_up] namespace [$name_space] unhealthy pod of => $unhealthy_pod not clean !"
|
||||
echo "${clean_log}" >>"/root/wdd/$clean_up_log"
|
||||
echo "${clean_log}"
|
||||
fi
|
||||
|
||||
echo "[NAMESPACE] - accomplished!"
|
||||
echo ""
|
||||
done
|
||||
done
|
||||
94
998-常用脚本/uavcloud-被cmii_operator替代/uavcloud-副本数量.sh
Normal file
94
998-常用脚本/uavcloud-被cmii_operator替代/uavcloud-副本数量.sh
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 设置命名空间
|
||||
NAMESPACE="xmyd"
|
||||
|
||||
desired_deployment_name="cmii"
|
||||
|
||||
change_to_integration() {
|
||||
echo "[change_to_integration] - namespace set to uavcloud-test!"
|
||||
NAMESPACE="uavcloud-test"
|
||||
echo ""
|
||||
}
|
||||
|
||||
change_to_validation() {
|
||||
echo "[change_to_validation] - namespace set to uavcloud-feature!"
|
||||
NAMESPACE="uavcloud-feature"
|
||||
echo ""
|
||||
}
|
||||
|
||||
change_to_uat() {
|
||||
echo "[change_to_uat] - namespace set to change_to_uat!"
|
||||
NAMESPACE="uavcloud-uat"
|
||||
echo ""
|
||||
}
|
||||
|
||||
restart_deployment() {
|
||||
|
||||
local need_to_scale_up_deployments
|
||||
# shellcheck disable=SC2207
|
||||
need_to_scale_up_deployments=($(kubectl get deployments -n $NAMESPACE -o=jsonpath='{.items[*].metadata.name}' | tr " " "\n"))
|
||||
|
||||
local deployment_name
|
||||
local desired_replica_count
|
||||
for deployment_name in "${need_to_scale_up_deployments[@]}"; do
|
||||
if echo "$deployment_name" | grep -qE "${desired_deployment_name}"; then
|
||||
echo "[RESTART] - namespace of [$NAMESPACE] deployment of [$deployment_name] need to RESTART !"
|
||||
echo ""
|
||||
desired_replica_count=$(kubectl -n "$NAMESPACE" get deployment "$deployment_name" -o=jsonpath='{.spec.replicas}')
|
||||
kubectl scale deployment "$deployment_name" --replicas=0 -n "$NAMESPACE"
|
||||
echo "[SCALE] - namespace of [$NAMESPACE] deployment of [$deployment_name] desired_replica_count is $desired_replica_count"
|
||||
kubectl scale deployment "$deployment_name" --replicas="${desired_replica_count}" -n "$NAMESPACE"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
change_replica_of_deployment() {
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
echo "[change_replica_of_deployment] - desired_replica_count is null , can not execute !"
|
||||
return 233
|
||||
fi
|
||||
local desired_replica_count
|
||||
desired_replica_count=$1
|
||||
|
||||
if [ "${desired_deployment_name}" == "" ]; then
|
||||
echo "[change_replica_of_deployment] - WARNING all deployment of [$NAMESPACE] will be SCALE to => [$desired_replica_count] !!"
|
||||
fi
|
||||
|
||||
local all_deployment_name_list
|
||||
# shellcheck disable=SC2207
|
||||
all_deployment_name_list=($(kubectl get deployments -n $NAMESPACE -o=jsonpath='{.items [*].metadata.name}' | tr " " "\n"))
|
||||
|
||||
local deployment_name
|
||||
for deployment_name in "${all_deployment_name_list[@]}"; do
|
||||
if echo "$deployment_name" | grep -qE "${desired_deployment_name}"; then
|
||||
echo ""
|
||||
echo "[SCALE] - namespace of [$NAMESPACE] deployment of [$deployment_name] SCALE TO => [$desired_replica_count]"
|
||||
kubectl scale deployment "$deployment_name" --replicas="${desired_replica_count}" -n "$NAMESPACE"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
# 修改环境
|
||||
change_to_validation
|
||||
|
||||
# 模糊匹配
|
||||
# 空则匹配命名空间中的全部deployment
|
||||
# desired_deployment_name="bitbu"
|
||||
|
||||
# 重启 deployment1
|
||||
# restart_deployment
|
||||
|
||||
# 改变deployment的副本数量
|
||||
change_replica_of_deployment 1
|
||||
|
||||
}
|
||||
|
||||
main
|
||||
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
\
|
||||
all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-dev uavcloud-devflight uavcloud-devoperation)
|
||||
# shellcheck disable=SC2207
|
||||
|
||||
|
||||
for name_space in "${all_cmii_name_space[@]}"; do
|
||||
echo "[NAMESPACE] - start to deal with namespace [$name_space]"
|
||||
if ! kubectl get ns "$name_space"; then
|
||||
echo "[NAMESPACE] - namespace of [$name_space] not exists !"
|
||||
echo ""
|
||||
continue
|
||||
fi
|
||||
echo ""
|
||||
|
||||
all_zero_replicas_deployment_list=($(kubectl get deployments -n "$name_space" -o=jsonpath='{.items[?(@.spec.replicas==0)].metadata.name}' | tr " " "\n"))
|
||||
|
||||
for deployment_name in "${all_zero_replicas_deployment_list[@]}"; do
|
||||
if echo "$deployment_name" | grep -q "cmii"; then
|
||||
echo "[SCALE] - going to scale [$name_space] [$deployment_name] to 1 !!"
|
||||
kubectl scale deployment "$deployment_name" --replicas=1 -n "$name_space"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
done
|
||||
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
namespace="uavcloud-demo"
|
||||
configmaps=$(kubectl get configmaps -n $namespace | grep tenant | awk '{print $1}')
|
||||
|
||||
for configmap in $configmaps
|
||||
do
|
||||
echo "ConfigMap: $configmap"
|
||||
kubectl get configmap $configmap -n $namespace -o yaml
|
||||
echo "---------------------"
|
||||
done
|
||||
20
998-常用脚本/uavcloud-被cmii_operator替代/uavcloud-消灭重启次数过多的应用.sh
Normal file
20
998-常用脚本/uavcloud-被cmii_operator替代/uavcloud-消灭重启次数过多的应用.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
#all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-uat uavcloud-dev uavcloud-devflight uavcloud-devoperation)
|
||||
all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-dev uavcloud-devflight uavcloud-devoperation)
|
||||
|
||||
echo ""
|
||||
for name_space in "${all_cmii_name_space[@]}"; do
|
||||
echo "[NAMESPACE] - start to deal with namespace [$name_space]"
|
||||
if ! kubectl get ns "$name_space"; then
|
||||
echo "[NAMESPACE] - namespace of [$name_space] not exists !"
|
||||
echo ""
|
||||
continue
|
||||
fi
|
||||
echo ""
|
||||
|
||||
kubectl get pods --namespace="${name_space}" --sort-by='.status.containerStatuses[].restartCount' | awk '$4 > 30 {print $1} ' | sed 's/-[a-z0-9]\{9,10\}-[a-z0-9]\{5\}$//' | xargs -I {} kubectl scale -n "${name_space}" --replicas=0 deployment {}
|
||||
echo ""
|
||||
|
||||
done
|
||||
24
998-常用脚本/uavcloud-被cmii_operator替代/批量删除.sh
Normal file
24
998-常用脚本/uavcloud-被cmii_operator替代/批量删除.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
batch_delete_pod() {
|
||||
local namespace=$1
|
||||
local app_name_prefix=$2
|
||||
if [ $namespace == "" ]; then
|
||||
echo "namespace is null"
|
||||
return
|
||||
fi
|
||||
if [ $app_name_prefix == "" ]; then
|
||||
echo "app_name_prefix is null"
|
||||
return
|
||||
fi
|
||||
|
||||
local app_pod_list=$(kubectl -n "$namespace" get pods -A | grep "$app_name_prefix" | awk '{print$2}'| tr "\n" " " )
|
||||
|
||||
for app in "${app_pod_list[@]}"; do
|
||||
echo "app is ${app}"
|
||||
kubectl -n "$namespace" delete pod $app
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
batch_delete_pod kube-system flannel
|
||||
29
998-常用脚本/uavcloud-被cmii_operator替代/重启所有cmii的pod.sh
Normal file
29
998-常用脚本/uavcloud-被cmii_operator替代/重启所有cmii的pod.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
name_space=uavcloud-dev
|
||||
|
||||
delete_all_fronted_cmii_pod(){
|
||||
|
||||
all_pod_list=($(kubectl get pods -n "$name_space" -o=jsonpath='{.items[*].metadata.name}' | tr " " "\n"))
|
||||
for pod in "${all_pod_list[@]}"; do
|
||||
if echo "$pod" | grep -q "cmii-uav-platform"; then
|
||||
echo ""
|
||||
echo "current pod is $pod"
|
||||
kubectl -n "$name_space" delete pod "$pod" --force
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
delete_all_backend_cmii_pod(){
|
||||
all_pod_list=($(kubectl get pods -n "$name_space" -o=jsonpath='{.items[*].metadata.name}' | tr " " "\n"))
|
||||
for pod in "${all_pod_list[@]}"; do
|
||||
if echo "$pod" | grep -v "platform" | grep -q "cmii"; then
|
||||
echo ""
|
||||
echo "current pod is $pod"
|
||||
kubectl -n "$name_space" delete pod "$pod" --force
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
delete_all_backend_cmii_pod
|
||||
20
998-常用脚本/uavcloud-被cmii_operator替代/镜像sha256提取.sh
Normal file
20
998-常用脚本/uavcloud-被cmii_operator替代/镜像sha256提取.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
|
||||
namespace=uavcloud-demo
|
||||
app_name_list=(uav-gateway cmii-uav-notice cmii-uas-gateway cmii-uas-lifecycle uav-platform-uas)
|
||||
|
||||
echo ""
|
||||
echo "current namespace is $namespace"
|
||||
echo ""
|
||||
|
||||
for app in ${app_name_list[@]};do
|
||||
echo "current app is $app"
|
||||
pod_name=$(kubectl -n ${namespace} get pods | grep ${app} | awk '{print$1}' | head -n1)
|
||||
|
||||
echo "pod name of app is => $pod_name"
|
||||
kubectl -n ${namespace} describe pod ${pod_name} | grep "Image ID:" | awk '{print $3}'
|
||||
|
||||
echo ""
|
||||
done
|
||||
52
998-常用脚本/备份脚本/备份命名空间.sh
Normal file
52
998-常用脚本/备份脚本/备份命名空间.sh
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
namespace=xmyd
|
||||
|
||||
install_yq() {
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
|
||||
chmod +x /usr/local/bin/yq
|
||||
|
||||
echo ""
|
||||
}
|
||||
|
||||
backup_all_deployment() {
|
||||
|
||||
echo "start to back up all deployments "
|
||||
|
||||
kubectl get deployments -n ${namespace} -o yaml | yq eval '.items[] | del( .metadata.managedFields, .metadata.annotations, .metadata.generation, .metadata.creationTimestamp, .metadata.uid, .metadata.resourceVersion, .status)' >all-deployment-$namespace.yaml
|
||||
sed -i '/^apiVersion:/i ---' all-deployment-$namespace.yaml
|
||||
echo " done !"
|
||||
echo ""
|
||||
}
|
||||
|
||||
backup_all_service() {
|
||||
echo "start to back up all services "
|
||||
kubectl get services -n ${namespace} -o yaml | yq eval '.items[] | del(.metadata.annotations,.metadata.managedFields, .metadata.selfLink, .metadata.creationTimestamp, .metadata.uid, .metadata.resourceVersion, .status)' - >all-service-$namespace.yaml
|
||||
sed -i '/^apiVersion:/i ---' all-service-$namespace.yaml
|
||||
echo " done !"
|
||||
echo ""
|
||||
}
|
||||
|
||||
backup_all_configmap() {
|
||||
echo "start to back up all configmaps "
|
||||
kubectl get configmaps -n ${namespace} -o yaml | yq eval '.items[] |del(.metadata.annotations, .metadata.managedFields, .metadata.selfLink, .metadata.creationTimestamp, .metadata.uid, .metadata.resourceVersion)' - >all-configmaps-$namespace.yaml
|
||||
sed -i '/^apiVersion:/i ---' all-configmaps-$namespace.yaml
|
||||
echo " done !"
|
||||
echo ""
|
||||
}
|
||||
|
||||
backup_all_stateful_sets() {
|
||||
echo "start to back up all stateful sets ! "
|
||||
kubectl get statefulsets -n ${namespace} -o yaml | yq eval '.items[] |del(.metadata.annotations, .metadata.managedFields, .metadata.selfLink, .metadata.creationTimestamp,.metadata.generation, .metadata.uid, .metadata.resourceVersion, .status)' - >all-statefull_sets-$namespace.yaml
|
||||
sed -i '/^apiVersion:/i ---' all-statefull_sets-$namespace.yaml
|
||||
echo " done !"
|
||||
echo ""
|
||||
}
|
||||
|
||||
#install_yq
|
||||
backup_all_deployment
|
||||
backup_all_service
|
||||
backup_all_stateful_sets
|
||||
backup_all_configmap
|
||||
|
||||
# https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64
|
||||
14
998-常用脚本/存储空间清理脚本/服务器常用目录空间检查.sh
Normal file
14
998-常用脚本/存储空间清理脚本/服务器常用目录空间检查.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
common_dir_list=(/var/lib/docker/ /var/log/ /root/ /data /home)
|
||||
|
||||
# 查找最深3级目录并计算空间占用
|
||||
|
||||
for dir in "${common_dir_list[@]}"
|
||||
do
|
||||
echo "start to find disk usage of $dir"
|
||||
find "$dir" -mindepth 1 -maxdepth 6 -exec du -sh {} + | sort -hr | head -n 10
|
||||
echo ""
|
||||
done
|
||||
|
||||
49
998-常用脚本/容器镜像ARM/ARM版本的镜像仓库.sh
Normal file
49
998-常用脚本/容器镜像ARM/ARM版本的镜像仓库.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
export QUAY=/var/lib/docker/quay
|
||||
|
||||
mkdir -p $QUAY/postgres
|
||||
setfacl -m u:26:-wx $QUAY/postgres
|
||||
|
||||
docker run -d --name postgresql \
|
||||
-e POSTGRES_USER=user \
|
||||
-e POSTGRES_PASSWORD=pass \
|
||||
-e POSTGRES_DB=quay \
|
||||
-p 5432:5432 \
|
||||
-v $QUAY/postgres:/var/lib/postgresql/data:Z \
|
||||
postgres:10.12
|
||||
|
||||
docker exec -it postgresql /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm" | psql -d quay -U user'
|
||||
|
||||
docker inspect -f "{{.NetworkSettings.IPAddress}}" postgresql
|
||||
|
||||
docker run -it --name redis \
|
||||
-p 6379:6379 \
|
||||
redis:6.2.14 \
|
||||
redis-server --test-memory
|
||||
|
||||
docker inspect -f "{{.NetworkSettings.IPAddress}}" redis
|
||||
|
||||
docker run --rm -it --name quay_config -p 8080:8080 quay.io/projectquay/quay:3.11.1 config secret
|
||||
|
||||
|
||||
mkdir $QUAY/storage
|
||||
mkdir $QUAY/config
|
||||
setfacl -m u:1001:-wx $QUAY/storage
|
||||
setfacl -m u:1001:-wx $QUAY/config
|
||||
|
||||
docker run -p 8033:8080 \
|
||||
--name=quay \
|
||||
--privileged=true \
|
||||
-v $QUAY/config:/conf/stack:Z \
|
||||
-v $QUAY/storage:/datastorage:Z \
|
||||
-d quay.io/projectquay/quay:3.11.1
|
||||
|
||||
|
||||
docker run -d --name redis \
|
||||
-p 6379:6379 \
|
||||
-m 4g \
|
||||
redis:6.2.14 \
|
||||
--requirepass strongpassword
|
||||
|
||||
docker login --tls-verify=false quay:8033
|
||||
|
||||
V2ryStr@ngPss
|
||||
5
998-常用脚本/容器镜像ARM/Harbor离线版本.txt
Normal file
5
998-常用脚本/容器镜像ARM/Harbor离线版本.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
|
||||
|
||||
|
||||
https://github.com/wise2c-devops/build-harbor-aarch64
|
||||
70
998-常用脚本/容器镜像ARM/quary-deployment.yaml
Normal file
70
998-常用脚本/容器镜像ARM/quary-deployment.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
version: "3.7"
|
||||
services:
|
||||
|
||||
quay-db:
|
||||
container_name: quay-db
|
||||
image: postgres:12.1
|
||||
environment:
|
||||
POSTGRES_USER: "quay"
|
||||
POSTGRES_PASSWORD: "quay"
|
||||
POSTGRES_DB: "quay"
|
||||
volumes:
|
||||
- "./local-dev/init/pg_bootstrap.sql:/docker-entrypoint-initdb.d/pg_bootstrap.sql"
|
||||
ports:
|
||||
- "5432:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U quay -d quay"]
|
||||
interval: 10s
|
||||
timeout: 9s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
clair-db:
|
||||
container_name: clair-db
|
||||
image: postgres:12.1
|
||||
environment:
|
||||
POSTGRES_USER: "clair"
|
||||
POSTGRES_DB: "clair"
|
||||
ports:
|
||||
- "5433:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U clair -d clair"]
|
||||
interval: 10s
|
||||
timeout: 9s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
redis:
|
||||
container_name: quay-redis
|
||||
image: redis:latest
|
||||
ports:
|
||||
- "6379:6379"
|
||||
|
||||
quay:
|
||||
container_name: quay-quay
|
||||
image: quay.io/projectquay/quay:3.11.1
|
||||
volumes:
|
||||
- "/var/lib/docker/quay:/quay-registry"
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "8443:8443"
|
||||
environment:
|
||||
DEBUGLOG: "true"
|
||||
IGNORE_VALIDATION: "true"
|
||||
|
||||
# clair is configured to share it's network
|
||||
# namespace with quay. this allows quay to serve
|
||||
# layers to clair over localhost.
|
||||
clair:
|
||||
container_name: quay-clair
|
||||
image: quay.io/projectquay/clair:4.0.0-rc.22
|
||||
volumes:
|
||||
- "/var/lib/docker/quay-clair:/src/clair/"
|
||||
environment:
|
||||
CLAIR_CONF: "/src/clair/config.yaml"
|
||||
CLAIR_MODE: "combo"
|
||||
network_mode: "service:quay"
|
||||
command:
|
||||
["bash", "-c", "cd /src/clair/cmd/clair; go run -mod vendor ."]
|
||||
depends_on:
|
||||
- quay
|
||||
BIN
998-常用脚本/容器镜像ARM/quay-config.tar.gz
Normal file
BIN
998-常用脚本/容器镜像ARM/quay-config.tar.gz
Normal file
Binary file not shown.
43
998-常用脚本/工具脚本/lap从35-71同步所有得离线镜像.sh
Normal file
43
998-常用脚本/工具脚本/lap从35-71同步所有得离线镜像.sh
Normal file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
target_host="192.168.35.71"
|
||||
gzip_folder_prefix="/root/octopus_image"
|
||||
|
||||
fetch_all_dep_gzip_file() {
|
||||
|
||||
mkdir -p $gzip_folder_prefix/middle/
|
||||
mkdir -p $gzip_folder_prefix/rke/
|
||||
|
||||
echo "start to scp all middle gzip file!"
|
||||
scp -r -P 22 root@"${target_host}":$gzip_folder_prefix/middle/ $gzip_folder_prefix/
|
||||
echo ""
|
||||
echo "start to scp all rke gzip file!"
|
||||
scp -r -P 22 root@"${target_host}":$gzip_folder_prefix/rke/ $gzip_folder_prefix/
|
||||
echo ""
|
||||
|
||||
ls "$gzip_folder_prefix/middle/"
|
||||
echo ""
|
||||
ls "$gzip_folder_prefix/rke/"
|
||||
echo ""
|
||||
|
||||
}
|
||||
|
||||
fetch_project_gzip_file() {
|
||||
local project_name="$1"
|
||||
if [ "$project_name" == "" ]; then
|
||||
echo "[fetch_project_gzip_file] no project name specific ! exits! ${project_name}"
|
||||
return
|
||||
fi
|
||||
|
||||
mkdir -p "$gzip_folder_prefix/${project_name}/"
|
||||
echo "start to scp all ${project_name} gzip file!"
|
||||
scp -r -P 22 root@"${target_host}":$gzip_folder_prefix/"${project_name}"/ $gzip_folder_prefix/"${project_name}"/
|
||||
echo ""
|
||||
|
||||
echo ""
|
||||
ls $gzip_folder_prefix/"${project_name}"/*
|
||||
echo ""
|
||||
}
|
||||
|
||||
#fetch_all_dep_gzip_file
|
||||
fetch_project_gzip_file "cqga"
|
||||
7
998-常用脚本/工具脚本/ubuntu下载离线安装包.sh
Normal file
7
998-常用脚本/工具脚本/ubuntu下载离线安装包.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
version_suffix="$(cat /etc/*-release | grep -E 'PRETTY_NAME(_LIKE)?=' | cut -d "=" -f2 | tr " " "-" | sed 's/["()]//g').tar.gz"
|
||||
offline_folder_prefix="/root/wdd/offline/"
|
||||
|
||||
# 全部写在一起
|
||||
140
998-常用脚本/工具脚本/下载离线安装包.sh
Normal file
140
998-常用脚本/工具脚本/下载离线安装包.sh
Normal file
@@ -0,0 +1,140 @@
|
||||
#!/bin/bash
|
||||
|
||||
version_suffix="$(cat /etc/*-release | grep -E 'PRETTY_NAME(_LIKE)?=' | cut -d "=" -f2 | tr " " "-" | sed 's/["()]//g').tar.gz"
|
||||
offline_folder_prefix="/root/wdd/offline/"
|
||||
|
||||
centos_common_tool_offline() {
|
||||
local common_tool_folder_prefix="/root/wdd/offline/common_tool"
|
||||
|
||||
mkdir -p $common_tool_folder_prefix
|
||||
|
||||
local item_list=(deltarpm net-tools iputils bind-utils lsof curl wget vim mtr htop screen unzip git openssl iftop tar zsh)
|
||||
local item
|
||||
for item in "${item_list[@]}"; do
|
||||
echo "current tool is => ${item}"
|
||||
sudo yum install -y --downloadonly --downloaddir=$common_tool_folder_prefix "${item}"
|
||||
echo ""
|
||||
done
|
||||
|
||||
ls $common_tool_folder_prefix
|
||||
|
||||
}
|
||||
|
||||
centos_nfs_utils_offline() {
|
||||
local nfs_utils_folder_prefix="${offline_folder_prefix}nfs_utils"
|
||||
|
||||
mkdir -p $nfs_utils_folder_prefix
|
||||
|
||||
sudo yum install -y --downloadonly --downloaddir=$nfs_utils_folder_prefix nfs-utils
|
||||
|
||||
ls $nfs_utils_folder_prefix
|
||||
|
||||
}
|
||||
|
||||
ubuntu_nfs_server_offline(){
|
||||
|
||||
local nfs_utils_folder_prefix="${offline_folder_prefix}nfs_kernel_server"
|
||||
|
||||
mkdir -p $nfs_utils_folder_prefix
|
||||
|
||||
cd $nfs_utils_folder_prefix
|
||||
pwd
|
||||
apt-get download $(apt-rdepends nfs-kernel-server | grep -v "^ " | sed 's/debconf-2.0/debconf/g')
|
||||
|
||||
ls $nfs_utils_folder_prefix
|
||||
}
|
||||
|
||||
ubuntu_nfs_common_offline(){
|
||||
local nfs_utils_folder_prefix="${offline_folder_prefix}nfs_common"
|
||||
|
||||
mkdir -p $nfs_utils_folder_prefix
|
||||
|
||||
cd $nfs_utils_folder_prefix
|
||||
pwd
|
||||
apt-get download $(apt-rdepends nfs-common | grep -v "^ " | sed 's/debconf-2.0/debconf/g')
|
||||
|
||||
ls $nfs_utils_folder_prefix
|
||||
}
|
||||
|
||||
centos_download_tool() {
|
||||
local tool_name=$1
|
||||
if [ "$tool_name" == "" ]; then
|
||||
echo "[centos_download_and_gzip_tool] no tool specific ! exits! ${tool_name}"
|
||||
return
|
||||
fi
|
||||
|
||||
local tool_folder_prefix="/root/wdd/offline/$tool_name"
|
||||
mkdir -p "$tool_folder_prefix"
|
||||
rm -rf "$tool_folder_prefix"/*
|
||||
|
||||
yum install -y --downloadonly --downloaddir="$tool_folder_prefix" "${tool_name}"
|
||||
|
||||
echo ""
|
||||
ls "${tool_folder_prefix}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
gzip_default_folder_split() {
|
||||
|
||||
if ! command -v "tar" &>/dev/null; then
|
||||
rpm -ivh $offline_folder_prefix/common_tool/tar*.rpm
|
||||
fi
|
||||
|
||||
cd ${offline_folder_prefix} || return
|
||||
pwd
|
||||
echo ""
|
||||
|
||||
local folder
|
||||
for folder in $(ls $offline_folder_prefix); do
|
||||
echo "the current folder is ${folder}"
|
||||
if [[ ! -d $folder ]]; then
|
||||
continue
|
||||
fi
|
||||
echo ""
|
||||
cd ${offline_folder_prefix} || exit
|
||||
tar -czvf $folder-$version_suffix ./$folder/*
|
||||
echo ""
|
||||
echo ""
|
||||
ls | grep $version_suffix
|
||||
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
test_base_command_exits() {
|
||||
local base_command_list=(ifconfig mtr vgdisplay nslookup vim htop tar unzip iftop curl wget netstat git zsh)
|
||||
local command
|
||||
for command in "${base_command_list[@]}"; do
|
||||
if command -v "$command" &>/dev/null; then
|
||||
echo "$command exists"
|
||||
else
|
||||
echo "ERROR $command does not exist!"
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
test_service_exists(){
|
||||
local base_service_list=(ntpd chronyd nginx nfs-server rpcbind docker)
|
||||
local service
|
||||
for service in "${base_service_list[@]}"; do
|
||||
if ! systemctl list-unit-files | grep "$service.service"; then
|
||||
echo "ERROR $service.service does not exist!"
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
mkdir -p ${offline_folder_prefix}
|
||||
#centos_common_tool_offline
|
||||
#centos_nfs_utils_offline
|
||||
#
|
||||
#centos_download_tool "ntp"
|
||||
#centos_download_tool "nginx"
|
||||
|
||||
ubuntu_nfs_server_offline
|
||||
ubuntu_nfs_common_offline
|
||||
#gzip_default_folder_split
|
||||
|
||||
#test_base_command_exits
|
||||
#test_service_exists
|
||||
103
998-常用脚本/常用脚本.sh
Normal file
103
998-常用脚本/常用脚本.sh
Normal file
@@ -0,0 +1,103 @@
|
||||
# 下载所有离线文件
|
||||
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/kebite-4.1.6.tar.gz
|
||||
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/middleware-images.tar.gz
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/rancher-1.20.4-image.tar.gz
|
||||
|
||||
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/rke_linux-aarch64-1.2.6
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/kubectl-1.20.4-0.aarch64.rpm
|
||||
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/cmlc-zzzsj.tar.gz
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/offline_map-2023-12-18.zip
|
||||
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/frontend-arm.tar.gz
|
||||
|
||||
wget --no-check-certificate https://oss.demo.uavcmlc.com:18000/cmlc-installation/cmlc-srs.tar.gz
|
||||
wget --no-check-certificate https://oss.demo.uavcmlc.com:18000/cmlc-installation/docker_release_ts2mp4_arm64.tar.gz
|
||||
wget --no-check-certificate https://oss.demo.uavcmlc.com:18000/cmlc-installation/ts2mp4_docker_image_arm64_v1.0.0.tar.gz
|
||||
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/v4.1.6/离线地图.zip
|
||||
|
||||
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/mysql-8.2.0-linux-glibc2.28-aarch64.tar.xz
|
||||
|
||||
# 批量复制文件
|
||||
ip_list=(172.10.125.6 172.10.125.141 172.10.125.120)
|
||||
for ip in "${ip_list[@]}"; do
|
||||
|
||||
echo "yes
|
||||
yes
|
||||
" | scp /etc/ssh/sshd_config root@${ip}:/etc/ssh/sshd_config
|
||||
|
||||
ssh root@${ip} "systemctl restart sshd"
|
||||
done
|
||||
|
||||
ip_list=(10.20.1.133 10.20.1.134 10.20.1.132)
|
||||
for ip in "${ip_list[@]}"; do
|
||||
scp /etc/docker/daemon.json root@${ip}:/etc/docker/daemon.json
|
||||
# scp /etc/ssh/sshd_config root@${ip}:/etc/ssh/sshd_config
|
||||
|
||||
ssh root@${ip} "systemctl restart docker"
|
||||
# ssh root@${ip} "systemctl restart sshd"
|
||||
done
|
||||
|
||||
|
||||
vim /etc/docker/daemon.json
|
||||
{
|
||||
"insecure-registries" : ["172.10.125.92:8033","harbor.cq-mlkj.com"]
|
||||
}
|
||||
|
||||
systemctl restart docker
|
||||
|
||||
|
||||
list=(iptables)
|
||||
|
||||
for Packages in "${list[@]}"
|
||||
do
|
||||
apt-get download $(apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances ${Packages} | grep "^\w" | sort -u)
|
||||
done
|
||||
|
||||
|
||||
sudo dpkg -i ./containerd.io_1.6.15-1_amd64.deb \
|
||||
./docker-ce-cli_20.10.10~3-0~ubuntu-focal_amd64.deb \
|
||||
./docker-ce_20.10.10~3-0~ubuntu-focal_amd64.deb \
|
||||
./docker-ce-rootless-extras_20.10.10~3-0~ubuntu-focal_amd64.deb \
|
||||
./docker-buildx-plugin_0.11.1-1~ubuntu.20.04~focal_amd64.deb \
|
||||
./docker-compose-plugin_2.19.1-1~ubuntu.20.04~focal_amd64.deb
|
||||
|
||||
rpcinfo -p localhost
|
||||
|
||||
# 生成ed25519 版本的ssh key
|
||||
ssh-keygen -t ed25519 -f .ssh/id_ed25519 -C "m@github"
|
||||
|
||||
echo $(cat .ssh/id_ed25519.pub)
|
||||
|
||||
echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHXDzet+Z2/AmrzIZpPviz7Z9AMxLWiJUOwtj/3NPauk m@github" >>.ssh/authorized_keys
|
||||
|
||||
# 修改calico-node检测的IP
|
||||
kubectl -n kube-system edit daemonset calico-node
|
||||
env:
|
||||
- name: FELIX_INTERFACEPREFIX
|
||||
value: "eth0"
|
||||
|
||||
# 强行删除所有的calico-node
|
||||
for calico in $(kubectl -n kube-system get pods | grep calico-node | awk '{print$1}');do
|
||||
echo "calico pod is => $calico"
|
||||
kubectl -n kube-system delete pod $calico
|
||||
done
|
||||
|
||||
# 强行删除所有的flannel-pod
|
||||
for flannel in $(kubectl -n kube-system get pods | grep kube-flannel | awk '{print$1}');do
|
||||
echo "flannel pod is => $flannel"
|
||||
kubectl -n kube-system delete pod $flannel
|
||||
done
|
||||
|
||||
|
||||
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306
|
||||
|
||||
|
||||
redis-cli -h 127.0.0.1 -a Mcache@4522
|
||||
|
||||
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"rancher","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://172.10.125.92:8033/api/v2.0/projects
|
||||
10
998-常用脚本/批量复制脚本
Normal file
10
998-常用脚本/批量复制脚本
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
ip_list=(10.20.1.133 10.20.1.134)
|
||||
|
||||
for ip in "${ip_list[@]}"; do
|
||||
|
||||
echo "yes
|
||||
yes
|
||||
" | scp fuse-overlayfs-1.7.1-2.x86_64.rpm slirp4netns-1.2.0-1.oe2203sp2.x86_64.rpm root@${ip}:/root/
|
||||
done
|
||||
15
998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh
Normal file
15
998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-amd64 -O /usr/local/bin/jq
|
||||
chmod +x /usr/local/bin/jq
|
||||
|
||||
|
||||
export name_space=xmyd
|
||||
|
||||
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force
|
||||
|
||||
kubectl get pods -n $name_space -o json | jq -r '.items[] | select(.status.containerStatuses[0].ready == false) | .metadata.name' | xargs -r kubectl delete pod -n $name_space --force
|
||||
|
||||
|
||||
kubectl -n ${name_space} delete pod helm-nacos-0 --force
|
||||
26
998-常用脚本/数据库备份脚本.sh
Normal file
26
998-常用脚本/数据库备份脚本.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
SQL_DUMP_FILE=/root/all_tables_5.2.0.sql
|
||||
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs ./mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 --single-transaction --source-data=2 --hex-blob --triggers --routines --events --no-data --set-gtid-purged=OFF --databases > "${SQL_DUMP_FILE}"
|
||||
|
||||
|
||||
SQL_FULL_BACK_UP_FILE=/home/mmc/all_tables_4.0.2_230914_fullback.sql
|
||||
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs ./mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 --single-transaction --source-data=2 --hex-blob --triggers --routines --events --set-gtid-purged=OFF --databases > "${SQL_FULL_BACK_UP_FILE}"
|
||||
|
||||
|
||||
|
||||
# 导入脚本
|
||||
|
||||
NEW_UPDATED_SQL_FILE_FOLDER=/root/database/123
|
||||
for sql_file in $(ls ${NEW_UPDATED_SQL_FILE_FOLDER} | sort -n); do
|
||||
echo "current file is $NEW_UPDATED_SQL_FILE_FOLDER/${sql_file}"
|
||||
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 <"$NEW_UPDATED_SQL_FILE_FOLDER/${sql_file}"
|
||||
echo "------------------"
|
||||
echo ""
|
||||
done
|
||||
|
||||
|
||||
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306
|
||||
|
||||
|
||||
|
||||
12
998-常用脚本/更新deplyment的tag号.sh
Normal file
12
998-常用脚本/更新deplyment的tag号.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
export name_space=ly
|
||||
export new_tag=5.1.0
|
||||
export deployment_name=cmii-uav-tower
|
||||
|
||||
image_prefix=$(kubectl -n ${name_space} get deployment ${deployment_name} -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
|
||||
|
||||
echo "image grep is => ${image_prefix}"
|
||||
|
||||
kubectl -n ${name_space} patch deployment ${deployment_name} -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${deployment_name}\",\"image\": \"${image_prefix}:${new_tag}\"}]}}}}"
|
||||
|
||||
68
998-常用脚本/更新脚本/一键更新Tag脚本.sh
Normal file
68
998-常用脚本/更新脚本/一键更新Tag脚本.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
|
||||
harbor_host=192.168.118.14:8033
|
||||
namespace=jsntejpt
|
||||
app_name=""
|
||||
new_tag=""
|
||||
|
||||
upload_image_to_harbor(){
|
||||
if [ "$app_name" == "" ]; then
|
||||
echo "app name null exit!"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
if ! docker load < "$1"; then
|
||||
echo "docker load error !"
|
||||
fi
|
||||
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
|
||||
docker login -u admin -p V2ryStr@ngPss $harbor_host
|
||||
docker push "$harbor_host/cmii/$app_name:$new_tag"
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
}
|
||||
|
||||
parse_args(){
|
||||
if [ "$1" == "" ]; then
|
||||
echo "no zip file in error!"
|
||||
exit 233
|
||||
fi
|
||||
local image_name="$1"
|
||||
|
||||
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
|
||||
app_name=$(echo $image_name | cut -d "=" -f1)
|
||||
new_tag=$(echo $image_name | cut -d "=" -f2)
|
||||
}
|
||||
|
||||
update_image_tag(){
|
||||
if [ "$new_tag" == "" ]; then
|
||||
echo "new tag error!"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
|
||||
|
||||
echo "image grep is => ${image_prefix}"
|
||||
|
||||
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
|
||||
echo ""
|
||||
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
|
||||
echo ""
|
||||
echo "start to wait for 3 seconds!"
|
||||
sleep 3
|
||||
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
|
||||
echo ""
|
||||
echo "new image are => $image_new"
|
||||
echo ""
|
||||
}
|
||||
|
||||
main(){
|
||||
parse_args "$1"
|
||||
upload_image_to_harbor "$1"
|
||||
update_image_tag
|
||||
}
|
||||
|
||||
main "$@"
|
||||
17
998-常用脚本/更新脚本/副本数调整.sh
Normal file
17
998-常用脚本/更新脚本/副本数调整.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
name_space=xmyd
|
||||
|
||||
|
||||
|
||||
kubectl get deployments -n ${xmyd} -o custom-columns='NAME:.metadata.name,REPLICAS:.spec.replicas' --no-headers > deployments_replicas.txt
|
||||
|
||||
|
||||
scale_back_replicas(){
|
||||
while IFS= read -r line; do
|
||||
name=$(echo "$line" | awk '{print $1}')
|
||||
replicas=$(echo "$line" | awk '{print $2}')
|
||||
kubectl scale deployment "$name" --replicas="$replicas" -n ${name_space}
|
||||
done < deployments_replicas.txt
|
||||
}
|
||||
|
||||
41
998-常用脚本/更新脚本/根据镜像全名称原地更新Tag.sh
Normal file
41
998-常用脚本/更新脚本/根据镜像全名称原地更新Tag.sh
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
full_image_name_list_path=/root/octopus_image/cmii/all-cmii-image-list.txt
|
||||
name_space=xmyd
|
||||
local_harbor_prefix=192.168.0.8:8033/cmii
|
||||
|
||||
read_and_update_cmii_image_tag(){
|
||||
linux_images=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
linux_images+=("${i}");
|
||||
done < "${full_image_name_list_path}"
|
||||
|
||||
for i in "${linux_images[@]}"; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo ""
|
||||
echo "[update] -- start of ${i}"
|
||||
case $i in
|
||||
*/*/*)
|
||||
last_segment="${i##*/}"
|
||||
local app_name=$(echo $last_segment | cut -d":" -f1)
|
||||
local new_tag=$(echo $last_segment | cut -d":" -f2)
|
||||
local image_name="$local_harbor_prefix/$app_name"
|
||||
echo "[update] -- app of ${app_name} to ${new_tag} ==> $image_name:${new_tag}"
|
||||
|
||||
if kubectl get deployment ${app_name} -n ${name_space} --ignore-not-found; then
|
||||
echo "Deployment exists"
|
||||
kubectl -n ${name_space} patch deployment ${app_name} -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${image_name}:${new_tag}\"}]}}}}"
|
||||
else
|
||||
echo "Deployment does not exist of ${app_name}"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
image_name="${DockerRegisterDomain}/rancher/${i}"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
read_and_update_cmii_image_tag
|
||||
31
998-常用脚本/研发环境相关/服务器基础环境.txt
Normal file
31
998-常用脚本/研发环境相关/服务器基础环境.txt
Normal file
@@ -0,0 +1,31 @@
|
||||
|
||||
|
||||
|
||||
echo "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI0+RoNm8IB+dXYwk9JkXb6a/fGXAtxj+0TOpE8t9/ZQrn2fDJuRK33W+gK+MJ8XOWWIkqiLW7/Hsjd5fpOecLQ= root@ops-01.ecs.io
|
||||
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAoiW8JzVq18VHnQfS4M6uqV6rApixlV6BZ//GsoKrXL4HClc1TmPj3DOb10Fpnj2VAwW5WqWu1ELPIwJV66U+k= root@ops-02.ecs.io
|
||||
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO6IjOcco9eAvA/T7LDsylCgjKJlLrVXY1zxO1/mX/MTzVZGuAhbikFJT2ZN2Up8iED+pJwpcps3LlA1wOjQC3Q= root@ops-04.ecs.io
|
||||
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC5RPqzYKLWVz+LvXaM+Jfz48nMMYRa35TB/+tG2QIxBownA/3+cC4tWFqntbtDrRcp2MHv7xzhTuwJEgHdpZbE= root@ops-03.ecs.io" >> /root/.ssh/authorized_keys
|
||||
|
||||
|
||||
echo "192.168.34.232 vip.ecs.io
|
||||
192.168.12.242 harbor.sre.cdcyy.cn
|
||||
192.168.92.185 harbor.cdcyy.com.cn
|
||||
192.168.39.4 harbor-qa.sre.cdcyy.cn" >> /etc/hosts
|
||||
|
||||
# ops-04 执行
|
||||
export ip=192.168.34.240
|
||||
|
||||
cd /root/wdd
|
||||
|
||||
ssh root@${ip} "mkdir -p /root/wdd"
|
||||
scp /root/wdd/octopus-agent_linux_amd64 root@${ip}:/root/wdd/
|
||||
scp /root/wdd/docker-amd64-20.10.15.tgz root@${ip}:/root/wdd/
|
||||
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${ip}:/root/wdd/
|
||||
|
||||
ssh root@${ip} "chmod +x /root/wdd/octopus-agent_linux_amd64"
|
||||
ssh root@${ip} "printf 'firewall\n' | /root/wdd/octopus-agent_linux_amd64 --mode=bastion"
|
||||
ssh root@${ip} "printf 'sysconfig\n' | /root/wdd/octopus-agent_linux_amd64 --mode=bastion"
|
||||
ssh root@${ip} "printf 'swap\n' | /root/wdd/octopus-agent_linux_amd64 --mode=bastion"
|
||||
ssh root@${ip} "printf 'selinux\n' | /root/wdd/octopus-agent_linux_amd64 --mode=bastion"
|
||||
ssh root@${ip} "printf 'docker\n' | /root/wdd/octopus-agent_linux_amd64 --mode=bastion"
|
||||
ssh root@${ip} "printf 'dockercompose\n' | /root/wdd/octopus-agent_linux_amd64 --mode=bastion"
|
||||
37
998-常用脚本/设置apt走代理.md
Normal file
37
998-常用脚本/设置apt走代理.md
Normal file
@@ -0,0 +1,37 @@
|
||||
要在Ubuntu上使用代理服务器来进行apt操作,可以按照以下步骤进行配置:
|
||||
|
||||
1. 打开终端,并进入`/etc/apt`目录:
|
||||
```shell
|
||||
cd /etc/apt
|
||||
```
|
||||
|
||||
2. 创建一个新的文件或编辑已有的文件,例如`apt.conf.d/99proxy`:
|
||||
```shell
|
||||
sudo nano apt.conf.d/99proxy
|
||||
```
|
||||
|
||||
3. 在打开的文件中添加以下内容,将代理服务器的地址和端口替换为实际的代理服务器信息:
|
||||
```shell
|
||||
Acquire::http::Proxy "http://proxy-server-address:proxy-port";
|
||||
Acquire::https::Proxy "http://proxy-server-address:proxy-port";
|
||||
```
|
||||
|
||||
4. 保存并关闭文件(使用Ctrl + X,然后按Y确认保存)。
|
||||
|
||||
5. 现在,您可以尝试运行任何apt命令,例如更新软件包列表:
|
||||
```shell
|
||||
sudo apt update
|
||||
```
|
||||
|
||||
注意:如果代理服务器需要用户名和密码进行身份验证,您还需要添加额外的配置。在步骤3中,将http和https行更改为如下所示,并替换`username`和`password`为实际的凭据:
|
||||
```shell
|
||||
Acquire::http::Proxy "http://username:password@proxy-server-address:proxy-port";
|
||||
Acquire::https::Proxy "http://username:password@proxy-server-address:proxy-port";
|
||||
```
|
||||
|
||||
这样就完成了在Ubuntu上使用代理服务器进行apt操作的配置。确保替换`proxy-server-address`和`proxy-port`为实际的代理服务器地址和端口,并根据需要进行身份验证的配置。
|
||||
|
||||
```shell
|
||||
Acquire::http::Proxy "http://192.168.8.102:10811";
|
||||
Acquire::https::Proxy "http://192.168.8.102:10811";
|
||||
```
|
||||
16
998-常用脚本/部署脚本/0-octopus-agent初始化脚本.sh
Normal file
16
998-常用脚本/部署脚本/0-octopus-agent初始化脚本.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# internet
|
||||
bash <(curl -sL http://42.192.52.227:9000/octopus/init-script-wdd.sh) --url http://42.192.52.227:9000/octopus --agent-install --offline
|
||||
|
||||
# no internet
|
||||
|
||||
export offline_minio=10.250.0.100
|
||||
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --help
|
||||
|
||||
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --zsh --tools --cn
|
||||
|
||||
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --agent-install --offline
|
||||
|
||||
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --agent-update --offline
|
||||
8
998-常用脚本/部署脚本/0-卸载挂载的磁盘.sh
Normal file
8
998-常用脚本/部署脚本/0-卸载挂载的磁盘.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
umount /var/lib/docker
|
||||
|
||||
vgremove datavg
|
||||
|
||||
pvremove /dev/vdb1
|
||||
96
998-常用脚本/部署脚本/0-挂载磁盘.sh
Normal file
96
998-常用脚本/部署脚本/0-挂载磁盘.sh
Normal file
@@ -0,0 +1,96 @@
|
||||
#! /bin/bash
|
||||
|
||||
# 关闭虚拟缓存
|
||||
swapoff -a
|
||||
cp -f /etc/fstab /etc/fstab_bak
|
||||
cat /etc/fstab_bak | grep -v swap >/etc/fstab
|
||||
|
||||
# echo "-----------------------------------------------------------------------"
|
||||
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
|
||||
# echo "扩容之前的root目录的容量为:${RootVolumeSizeBefore}"
|
||||
|
||||
# echo "y
|
||||
|
||||
|
||||
# " | lvremove /dev/mapper/centos-swap
|
||||
|
||||
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
|
||||
|
||||
# lvextend -l+${freepesize} /dev/mapper/centos-root
|
||||
|
||||
|
||||
# ## #自动扩展XFS文件系统到最大的可用大小
|
||||
# xfs_growfs /dev/mapper/centos-root
|
||||
|
||||
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
|
||||
|
||||
# echo "-----------------------------------------------------------------------"
|
||||
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
|
||||
# echo "扩容之后的root目录的容量为:${RootVolumeSizeAfter}"
|
||||
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
|
||||
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
|
||||
|
||||
# echo "恭喜,您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
|
||||
|
||||
|
||||
yum install lvm2 -y
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
echo "-----------------------------------------------------------------------"
|
||||
|
||||
export VG_NAME=datavg
|
||||
|
||||
echo "n
|
||||
p
|
||||
|
||||
|
||||
|
||||
t
|
||||
|
||||
8e
|
||||
w
|
||||
" | fdisk /dev/sdb
|
||||
partprobe
|
||||
# 如果已经存在卷组,直接进行添加
|
||||
# vgextend /dev/mapper/centos /dev/vda3
|
||||
vgcreate ${VG_NAME} /dev/sdb1
|
||||
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
|
||||
# 大小根据实际情况调整
|
||||
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
|
||||
#mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
|
||||
mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
|
||||
mkdir -p /data
|
||||
mkdir -p /var/lib/docker
|
||||
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
|
||||
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
|
||||
echo "${selffstab}" >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
df -TH
|
||||
echo "-----------------------------------------------------------------------"
|
||||
|
||||
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
|
||||
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
|
||||
# xfs_growfs /dev/mapper/${VG_NAME}-root
|
||||
|
||||
# 自定义 安装lvm2'
|
||||
echo "n
|
||||
p
|
||||
|
||||
|
||||
|
||||
t
|
||||
|
||||
8e
|
||||
w
|
||||
" | fdisk /dev/vda
|
||||
partprobe
|
||||
vgextend klas_host-10-190-202-141 /dev/vda4
|
||||
lvextend -l +100%FREE /dev/mapper/klas_host--10--190--202--141-root
|
||||
partprobe
|
||||
xfs_growfs /dev/mapper/klas_host--10--190--202--141-root
|
||||
df -TH
|
||||
10
998-常用脚本/部署脚本/0-挂载裸盘.sh
Normal file
10
998-常用脚本/部署脚本/0-挂载裸盘.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
|
||||
sudo mkfs.ext4 /dev/vdb
|
||||
sudo mkdir -p /var/lib/docker
|
||||
sudo mount /dev/vdb /var/lib/docker
|
||||
echo '/dev/vdb /var/lib/docker ext4 defaults 0 2' | sudo tee -a /etc/fstab
|
||||
sudo mount -a
|
||||
df -h
|
||||
46
998-常用脚本/部署脚本/MINIO-初始化.sh
Normal file
46
998-常用脚本/部署脚本/MINIO-初始化.sh
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
# 替换namespace
|
||||
# 替换minio的实际地址和端口
|
||||
# 修改rabbitmq的实际地址和端口,需要暴露出来
|
||||
|
||||
curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /usr/local/bin/mc
|
||||
chmod +x /usr/local/bin/mc
|
||||
|
||||
|
||||
export tenant_name=uavcloud-dev
|
||||
mc alias set ${tenant_name} https://minio.ig-dev.uavcmlc.com cmii B#923fC7mk
|
||||
|
||||
|
||||
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls
|
||||
|
||||
|
||||
# mc alias set demo https://oss.demo.uavcmlc.com:18000 cmii B#923fC7mk
|
||||
#
|
||||
# mc cp -r demo/jadenq/scenariomock/xg/ ${tenant_name}/jadenq/scenariomock/xg/
|
||||
# mc cp -r demo/jadenq/application/file/中移凌云使用手册.pdf ${tenant_name}/jadenq/application/file/中移凌云使用手册.pdf
|
||||
# mc cp -r demo/jadenq/defimage/def.jpg ${tenant_name}/jadenq/defimage/def.jpg
|
||||
# mc cp -r demo/pub-cms/application/img/ ${tenant_name}/pub-cms/application/img/
|
||||
|
||||
|
||||
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@10.250.0.200:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
|
||||
|
||||
sleep 5
|
||||
|
||||
mc admin service restart ${tenant_name}
|
||||
|
||||
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
|
||||
|
||||
mc ilm add --expiry-days "1" ${tenant_name}/tus
|
||||
|
||||
23
998-常用脚本/部署脚本/deploy-nfs-server.sh
Normal file
23
998-常用脚本/部署脚本/deploy-nfs-server.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
nfs_data_path="/var/lib/docker/nfs_data"
|
||||
#nfs_data_path="/data/nfs_data"
|
||||
|
||||
|
||||
deploy_nfs_server(){
|
||||
mkdir -p $nfs_data_path
|
||||
chmod 777 $nfs_data_path
|
||||
|
||||
echo "${nfs_data_path} *(rw,no_root_squash,no_all_squash,sync)" >> /etc/exports
|
||||
|
||||
systemctl restart rpcbind
|
||||
systemctl restart nfs-server
|
||||
|
||||
systemctl enable rpcbind
|
||||
systemctl enable nfs-server
|
||||
|
||||
}
|
||||
deploy_nfs_server
|
||||
|
||||
|
||||
# docker login -u admin -p V2ryStr@ngPss 10.100.2.121:8033
|
||||
111
998-常用脚本/部署脚本/install_minio.sh
Normal file
111
998-常用脚本/部署脚本/install_minio.sh
Normal file
@@ -0,0 +1,111 @@
|
||||
#!/bin/bash
|
||||
|
||||
minio_local_path=/var/lib/docker/minio-pv/pv1
|
||||
harbor_host=10.20.1.135:8033
|
||||
inner_master_ip=10.20.1.135
|
||||
minio_host_ip=110.20.1.139
|
||||
|
||||
install_minio(){
|
||||
|
||||
echo "start to create minio local path !"
|
||||
mkdir -p ${minio_local_path}
|
||||
chmod -R 777 ${minio_local_path}
|
||||
mkdir -p /root/wdd/install/
|
||||
|
||||
cat > /root/wdd/install/minio-docker-compose.yaml <<EOF
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
minio1:
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
image: '${harbor_host}/cmii/minio:2022.5.4'
|
||||
environment:
|
||||
- MINIO_ROOT_USER=cmii
|
||||
- MINIO_ROOT_PASSWORD=B#923fC7mk
|
||||
restart: always
|
||||
volumes:
|
||||
- ${minio_local_path}:/data
|
||||
EOF
|
||||
|
||||
echo "start minio container !"
|
||||
docker-compose -f /root/wdd/install/minio-docker-compose.yaml up -d
|
||||
echo ""
|
||||
|
||||
}
|
||||
|
||||
install_docker_compose(){
|
||||
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-amd64 -o /usr/local/bin/docker-compose
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
}
|
||||
|
||||
init_minio(){
|
||||
echo "start to download mc!"
|
||||
if [[ ! -f /usr/local/bin/mc ]]; then
|
||||
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mc -o /usr/local/bin/mc
|
||||
chmod +x /usr/local/bin/mc
|
||||
fi
|
||||
|
||||
# curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /usr/local/bin/mc
|
||||
|
||||
echo ""
|
||||
sleep 5
|
||||
|
||||
export tenant_name=outside
|
||||
mc alias set ${tenant_name} http://${minio_host_ip}:9000 cmii B#923fC7mk
|
||||
|
||||
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata
|
||||
echo ""
|
||||
|
||||
echo "set rabbit mq"
|
||||
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
|
||||
echo ""
|
||||
|
||||
echo "sleep 5 s!"
|
||||
sleep 5
|
||||
|
||||
mc admin service restart ${tenant_name}
|
||||
|
||||
echo "sleep 5 s!"
|
||||
sleep 5
|
||||
echo ""
|
||||
|
||||
|
||||
echo "start to add event notification !"
|
||||
|
||||
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
|
||||
|
||||
mc ilm add --expiry-days "1" ${tenant_name}/tus
|
||||
|
||||
echo ""
|
||||
echo "done of init !"
|
||||
|
||||
}
|
||||
|
||||
#install_docker_compose
|
||||
|
||||
install_minio
|
||||
|
||||
if [[ $(docker inspect -f '{{.State.Running}}' install-minio1-1) -eq "true" ]]; then
|
||||
echo "minio is running now! start to init minio!"
|
||||
init_minio
|
||||
fi
|
||||
|
||||
#init_minio
|
||||
|
||||
|
||||
26
998-常用脚本/部署脚本/nginx暴露/在线安装nginx-centos.sh
Normal file
26
998-常用脚本/部署脚本/nginx暴露/在线安装nginx-centos.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
sudo yum install -y yum-utils
|
||||
|
||||
cat >/etc/yum.repos.d/nginx.repo<<EOF
|
||||
[nginx-stable]
|
||||
name=nginx stable repo
|
||||
baseurl=http://nginx.org/packages/centos/8/x86_64/
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
gpgkey=https://nginx.org/keys/nginx_signing.key
|
||||
module_hotfixes=true
|
||||
|
||||
[nginx-mainline]
|
||||
name=nginx mainline repo
|
||||
baseurl=http://nginx.org/packages/mainline/centos/8/x86_64/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
gpgkey=https://nginx.org/keys/nginx_signing.key
|
||||
module_hotfixes=true
|
||||
EOF
|
||||
|
||||
yum-config-manager --enable nginx-mainline
|
||||
|
||||
yum install -y nginx
|
||||
19
998-常用脚本/部署脚本/nginx暴露/在线安装nginx.sh
Normal file
19
998-常用脚本/部署脚本/nginx暴露/在线安装nginx.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
sudo apt install -y curl gnupg2 ca-certificates lsb-release ubuntu-keyring
|
||||
|
||||
curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor \
|
||||
| sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null
|
||||
|
||||
gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg
|
||||
|
||||
echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \
|
||||
http://nginx.org/packages/ubuntu `lsb_release -cs` nginx" \
|
||||
| sudo tee /etc/apt/sources.list.d/nginx.list
|
||||
|
||||
echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" \
|
||||
| sudo tee /etc/apt/preferences.d/99nginx
|
||||
|
||||
sudo apt update
|
||||
sudo apt install -y nginx
|
||||
32
998-常用脚本/部署脚本/nginx暴露/真实nginx-offline-map.conf
Normal file
32
998-常用脚本/部署脚本/nginx暴露/真实nginx-offline-map.conf
Normal file
@@ -0,0 +1,32 @@
|
||||
server {
|
||||
listen 8889;
|
||||
server_name localhost;
|
||||
#允许跨域请求的域,*代表所有
|
||||
add_header 'Access-Control-Allow-Origin' *;
|
||||
#允许带上cookie请求
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
#允许请求的方法,比如 GET/POST/PUT/DELETE
|
||||
add_header 'Access-Control-Allow-Methods' *;
|
||||
#允许请求的header
|
||||
add_header 'Access-Control-Allow-Headers' *;
|
||||
|
||||
location /electronic {
|
||||
root /root/offline_map/;
|
||||
autoindex on;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Allow-Methods 'GET,POST';
|
||||
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
}
|
||||
|
||||
location /satellite {
|
||||
root /root/offline_map/;
|
||||
autoindex on;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Allow-Methods 'GET,POST';
|
||||
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
}
|
||||
|
||||
# http://192.168.6.6:8889/electronic/{z}/{x}/{y}.png
|
||||
# http://192.168.6.6:8889/satellite/{z}/{x}/{y}.png
|
||||
# /root/offline_map/satellite /root/offline_map/electronic
|
||||
}
|
||||
43
998-常用脚本/部署脚本/nginx暴露/真实nginx-reverse-proxy.conf
Normal file
43
998-常用脚本/部署脚本/nginx暴露/真实nginx-reverse-proxy.conf
Normal file
@@ -0,0 +1,43 @@
|
||||
server {
|
||||
listen 8088;
|
||||
server_name localhost;
|
||||
location / {
|
||||
proxy_pass http://localhost:30500;
|
||||
client_max_body_size 5120m;
|
||||
client_body_buffer_size 5120m;
|
||||
client_body_timeout 6000s;
|
||||
proxy_send_timeout 10000s;
|
||||
proxy_read_timeout 10000s;
|
||||
proxy_connect_timeout 600s;
|
||||
proxy_max_temp_file_size 5120m;
|
||||
proxy_request_buffering on;
|
||||
proxy_buffering off;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 12k;
|
||||
proxy_set_header Host fake-domain.jxejpt.io;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /_AMapService/v4/map/styles {
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://webapi.amap.com/v4/ap/styles;
|
||||
}
|
||||
|
||||
location /_AMapService/ {
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://restapi.amap.com/;
|
||||
}
|
||||
|
||||
location /rtc/v1/ {
|
||||
add_header Access-Control-Allow-Headers X-Requested-With;
|
||||
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
|
||||
proxy_pass http://127.0.0.1:30985/rtc/v1/;
|
||||
}
|
||||
|
||||
location ~ ^/\w*/actuator/ {
|
||||
return 403;
|
||||
}
|
||||
}
|
||||
44
998-常用脚本/部署脚本/nginx暴露/真实的nginx配置.conf
Normal file
44
998-常用脚本/部署脚本/nginx暴露/真实的nginx配置.conf
Normal file
@@ -0,0 +1,44 @@
|
||||
user root;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
use epoll;
|
||||
worker_connections 65535;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
server_tokens off;
|
||||
sendfile on;
|
||||
send_timeout 1200;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 600;
|
||||
types_hash_max_size 2048;
|
||||
|
||||
client_max_body_size 2048m;
|
||||
client_body_buffer_size 2048m;
|
||||
underscores_in_headers on;
|
||||
|
||||
proxy_send_timeout 600;
|
||||
proxy_read_timeout 600;
|
||||
proxy_connect_timeout 600;
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 8 256k;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
stream {
|
||||
include /etc/nginx/conf.d/stream/*.conf;
|
||||
}
|
||||
13
998-常用脚本/部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml
Normal file
13
998-常用脚本/部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
cmii-nginx:
|
||||
image: 10.20.1.135:8033/cmii/nginx:1.21.3
|
||||
volumes:
|
||||
- /etc/nginx/conf.d:/etc/nginx/conf.d
|
||||
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
|
||||
- /root/offline_map:/root/offline_map
|
||||
ports:
|
||||
- "8088:8088"
|
||||
- "8089:8089"
|
||||
restart: always
|
||||
45
998-常用脚本/部署脚本/nginx暴露/设置ingress-nginx.sh
Normal file
45
998-常用脚本/部署脚本/nginx暴露/设置ingress-nginx.sh
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
modify_ingress_nginx_host_network(){
|
||||
echo "start to modify hostnetwork to false !"
|
||||
kubectl patch daemonset nginx-ingress-controller -n ingress-nginx --patch '{"spec":{"template":{"spec":{"hostNetwork": false}}}}'
|
||||
echo ""
|
||||
kubectl get daemonset -n ingress-nginx nginx-ingress-controller -o jsonpath='{.spec.template.spec.hostNetwork}'
|
||||
}
|
||||
|
||||
build_for_ingress_nginx_node_port(){
|
||||
echo "start to write ingress nginx node port service !"
|
||||
mkdir -p /root/wdd/install/
|
||||
cat >>/root/wdd/install/k8s-ingress-nginx.yaml<<EOF
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress-nginx-service
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
nodePort: 30500
|
||||
- name: https
|
||||
protocol: TCP
|
||||
port: 443
|
||||
targetPort: 443
|
||||
nodePort: 31500
|
||||
selector:
|
||||
app: ingress-nginx
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
EOF
|
||||
echo ""
|
||||
kubectl apply -f /root/wdd/install/k8s-ingress-nginx.yaml
|
||||
}
|
||||
|
||||
modify_ingress_nginx_host_network
|
||||
|
||||
build_for_ingress_nginx_node_port
|
||||
|
||||
|
||||
33
998-常用脚本/部署脚本/从离线部署机倒入脚本到服务器.sh
Normal file
33
998-常用脚本/部署脚本/从离线部署机倒入脚本到服务器.sh
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
need_to_import_sql_folder="/home/wdd/Documents/master_data_5.2.0"
|
||||
target_host_ip=20.4.13.81
|
||||
target_host_port=33306
|
||||
target_user=root
|
||||
target_passwd=QzfXQhd3bQ
|
||||
|
||||
|
||||
full_back_up_database(){
|
||||
echo "yes"
|
||||
}
|
||||
|
||||
|
||||
batch_import_sql_struct_to_server(){
|
||||
|
||||
for sql_file in $(ls ${need_to_import_sql_folder} | sort -n); do
|
||||
if [ -d $need_to_import_sql_folder/${sql_file} ];then
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "current file is $need_to_import_sql_folder/${sql_file}"
|
||||
|
||||
/root/wdd/mysql/bin/mysql -u${target_user} -p${target_passwd} -h${target_host_ip} -P${target_host_port} <"$need_to_import_sql_folder/${sql_file}"
|
||||
|
||||
echo "------------------"
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
batch_import_sql_struct_to_server
|
||||
|
||||
|
||||
38
998-常用脚本/部署脚本/安装ts2mp4.sh
Normal file
38
998-常用脚本/部署脚本/安装ts2mp4.sh
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
minio_inner_ip_host=10.129.80.223:9000
|
||||
|
||||
download_ts2mp4_file(){
|
||||
echo ""
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/ts2mp4_docker_image_v1.tar.gz
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/pack_ts2mp4_release-0521.tar.gz
|
||||
}
|
||||
|
||||
bootup_ts2mp4(){
|
||||
if [[ -f ts2mp4_docker_image_v1.tar.gz ]]; then
|
||||
echo "start to load ts2mp4 image file !"
|
||||
docker load -i ts2mp4_docker_image_v1.tar.gz
|
||||
echo ""
|
||||
|
||||
|
||||
echo "init ts2mp4 config!"
|
||||
tar -zvxf pack_ts2mp4_x64-0724.tar.gz
|
||||
|
||||
echo "start to modify!"
|
||||
sed -i "s/https:\/\/minio.ig-uat.uavcmlc.com:31500/http:\/\/$minio_inner_ip_host/g" $(pwd)/pack_ts2mp4_release/server_config_docker.ini
|
||||
bash $(pwd)/pack_ts2mp4_release/run_docker.sh
|
||||
|
||||
sleep 3
|
||||
if docker ps | grep -q ts2mp4; then
|
||||
echo "ts2mp4 started successful !"
|
||||
else
|
||||
echo "ts2mp4 FAILED!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
#download_ts2mp4_file
|
||||
bootup_ts2mp4
|
||||
46
998-常用脚本/部署脚本/开机启动的脚本.txt
Normal file
46
998-常用脚本/部署脚本/开机启动的脚本.txt
Normal file
@@ -0,0 +1,46 @@
|
||||
vim /etc/systemd/system/cmii-startup.service
|
||||
|
||||
|
||||
[Unit]
|
||||
Description=Cmii Start Up Script
|
||||
|
||||
[Service]
|
||||
ExecStart=/bin/bash /cmii/start-up.sh
|
||||
User=root
|
||||
Group=root
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
|
||||
vim /cmii/start-up.sh
|
||||
|
||||
docker-compose -f /cmii/harbor/docker-compose.yml up -d
|
||||
sleep 10
|
||||
|
||||
docker-compose -f /cmii/0-minio-dockercompose.yml up -d
|
||||
|
||||
rm -rf /nfsdata/zhbf-helm-emqxs-pvc-fdb605a0-5120-481a-bdd5-7ef1213c2363/
|
||||
|
||||
sleep 5
|
||||
|
||||
kubectl delete -n zhbf pod helm-nacos-0 --force
|
||||
kubectl delete -n zhbf pod helm-emqxs-0 --force
|
||||
kubectl delete -n zhbf pod helm-redis-master-0 --force
|
||||
kubectl delete -n zhbf pod helm-redis-replicas-0 --force
|
||||
|
||||
sleep 30
|
||||
|
||||
for kindof in pods
|
||||
do
|
||||
kubectl -n zhbf delete $kindof $(kubectl -n zhbf get $kindof | grep "cmii"| awk '{print$1}')
|
||||
done
|
||||
|
||||
|
||||
chmod +x /cmii/start-up.sh
|
||||
|
||||
|
||||
systemctl daemon-reload
|
||||
sudo systemctl enable cmii-startup.service
|
||||
|
||||
7
998-常用脚本/部署脚本/手动创建harbor仓库.sh
Normal file
7
998-常用脚本/部署脚本/手动创建harbor仓库.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
|
||||
|
||||
export harbor_host=10.129.80.218:8033
|
||||
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects
|
||||
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"rancher","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects
|
||||
57
998-常用脚本/部署脚本/清理rke集群的安装.sh
Normal file
57
998-常用脚本/部署脚本/清理rke集群的安装.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
all_ip_list=(172.18.10.8 172.18.10.239 172.18.10.231 172.18.10.198)
|
||||
|
||||
clean_octopus_agent() {
|
||||
# 无法运行
|
||||
local server
|
||||
for server in "${all_ip_list[@]}"; do
|
||||
echo "the current server is ${server}"
|
||||
ssh root@"${server}" "echo yes"
|
||||
ssh root@"${server}" "docker container stop $(docker ps -aq) && docker prune -y"
|
||||
ssh root@"${server}" "for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done"
|
||||
ssh root@"${server}" "rm -rf /etc/ceph /etc/cni /etc/kubernetes /etc/rancher /opt/cni /opt/rke /run/secrets/kubernetes.io /run/calico /run/flannel /var/lib/calico /var/lib/etcd /var/lib/cni /var/lib/kubelet /var/lib/rancher /var/log/containers /var/log/kube-audit /var/log/pods /var/run/calico"
|
||||
ssh root@"${server}" "ip link delete flannel.1"
|
||||
ssh root@"${server}" "ip link delete cni0"
|
||||
ssh root@"${server}" "ip link delete tunl0"
|
||||
ssh root@"${server}" "rmmod ipip"
|
||||
ssh root@"${server}" "iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F"
|
||||
done
|
||||
}
|
||||
|
||||
clean_rke_cluster() {
|
||||
|
||||
k8s_componet=(kubelet kube-proxy kube-apiserver kube-controller-manager kube-scheduler etcd)
|
||||
for componet in ${k8s_componet[@]}; do docker container stop $componet && docker container rm $componet; done
|
||||
|
||||
|
||||
for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done
|
||||
|
||||
rm -rf /etc/ceph \
|
||||
/etc/cni \
|
||||
/etc/kubernetes \
|
||||
/etc/rancher \
|
||||
/opt/cni \
|
||||
/opt/rke \
|
||||
/run/secrets/kubernetes.io \
|
||||
/run/calico \
|
||||
/run/flannel \
|
||||
/var/lib/calico \
|
||||
/var/lib/etcd \
|
||||
/var/lib/cni \
|
||||
/var/lib/kubelet \
|
||||
/var/lib/rancher /var/log/containers \
|
||||
/var/log/kube-audit \
|
||||
/var/log/pods \
|
||||
/var/run/calico
|
||||
|
||||
ip link delete flannel.1
|
||||
ip link delete cni0
|
||||
ip link delete tunl0
|
||||
rmmod ipip
|
||||
|
||||
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F
|
||||
|
||||
}
|
||||
|
||||
clean_rke_cluster
|
||||
15
998-常用脚本/部署脚本/编辑calico状态.sh
Normal file
15
998-常用脚本/部署脚本/编辑calico状态.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# 修改calico-node检测的IP
|
||||
kubectl -n kube-system edit daemonset calico-node
|
||||
env:
|
||||
- name: FELIX_INTERFACEPREFIX
|
||||
value: "eth0"
|
||||
|
||||
# 更加保险
|
||||
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
|
||||
|
||||
|
||||
# 删除所有的calico pod
|
||||
kubectl delete pods --namespace=kube-system -l k8s-app=calico-node
|
||||
43
998-常用脚本/重启服务器恢复/1-重启脚本.sh
Normal file
43
998-常用脚本/重启服务器恢复/1-重启脚本.sh
Normal file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
install_prefix=/root/wdd/install
|
||||
|
||||
echo "运行前确保所有虚拟机都已经启动,IP网卡正常可互通"
|
||||
|
||||
#重启镜像服务
|
||||
echo "恢复镜像服务"
|
||||
docker-compose -f /root/wdd/harbor/docker-compose.yml up -d
|
||||
echo "等待10s"
|
||||
sleep 10s
|
||||
|
||||
# 关停所有的后端服务
|
||||
kubectl --kubeconfig /root/.kube/config delete -f ${install_prefix}/k8s-mysql.yaml
|
||||
kubectl --kubeconfig /root/.kube/config delete -f ${install_prefix}/k8s-nacos.yaml
|
||||
kubectl --kubeconfig /root/.kube/config delete -f ${install_prefix}/k8s-backend.yaml
|
||||
|
||||
echo "重启本机nginx服务"
|
||||
systemctl stop nginx && sleep 3 && systemctl start nginx
|
||||
docker-compose -f ${install_prefix}/nginx-docker-compose.yaml up -d
|
||||
|
||||
echo "等待10s 开始启动全部的服务"
|
||||
kubectl --kubeconfig /root/.kube/config apply -f ${install_prefix}/k8s-mysql.yaml
|
||||
echo "等待360s, 等待mysql启动!"
|
||||
sleep 360s
|
||||
kubectl --kubeconfig /root/.kube/config apply -f ${install_prefix}/k8s-nacos.yaml
|
||||
sleep 30s
|
||||
echo "等待30s,等待nacos启动!"
|
||||
|
||||
|
||||
kubectl --kubeconfig /root/.kube/config apply -f ${install_prefix}/k8s-backend.yaml
|
||||
echo "等待 10min"
|
||||
sleep 10m
|
||||
|
||||
kubectl --kubeconfig /root/.kube/config -n bjtg get pods | grep Off | awk '{print$1}' | xargs kubectl --kubeconfig /root/.kube/config -n bjtg delete pod
|
||||
|
||||
echo "等待2分钟,确保服务都完全启动"
|
||||
sleep 2m
|
||||
|
||||
kubectl --kubeconfig /root/.kube/config get pods -A
|
||||
|
||||
echo "确认上文输出都是running"
|
||||
echo "可登录平台进行验证"
|
||||
12
998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh
Normal file
12
998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
echo "运行前确保所有虚拟机都已经启动,IP网卡正常可互通"
|
||||
|
||||
#重启镜像服务
|
||||
echo "恢复MINIO服务"
|
||||
docker-compose -f /root/wdd/minio-docker-compose.yaml up -d
|
||||
|
||||
if docker ps | grep -q wdd-minio1-1; then
|
||||
echo "minio restart success !"
|
||||
else
|
||||
echo "minio restart failed !"
|
||||
fi
|
||||
11
998-常用脚本/重启服务器恢复/2-restore_from_reboot.service
Normal file
11
998-常用脚本/重启服务器恢复/2-restore_from_reboot.service
Normal file
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Restore CMII after Reboot
|
||||
|
||||
[Service]
|
||||
ExecStart=/root/wdd/reboot/restore_from_reboot.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
# /etc/systemd/system/restore_from_reboot.service
|
||||
15
998-常用脚本/重启服务器恢复/3-restore_from_reboot.timer
Normal file
15
998-常用脚本/重启服务器恢复/3-restore_from_reboot.timer
Normal file
@@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=Run restore_from_reboot 10s after boot
|
||||
|
||||
[Timer]
|
||||
OnBootSec=10
|
||||
Unit=restore_from_reboot.service
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
# /etc/systemd/system/restore_from_reboot.timer
|
||||
# sudo systemctl daemon-reload
|
||||
sudo systemctl enable restore_from_reboot.timer
|
||||
journalctl -u restore_from_reboot -n 150 -f
|
||||
147
998-常用脚本/镜像同步/2-imageDownSync.sh
Normal file
147
998-常用脚本/镜像同步/2-imageDownSync.sh
Normal file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
### 需要修改以下的内容 ###
|
||||
#### 需要修改以下的内容 ###
|
||||
#### 需要修改以下的内容 ###
|
||||
|
||||
cmlc_app_image_list="szga-0711.txt" # 需要修改版本
|
||||
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
|
||||
middleware_image_list="middleware-images.txt" # 一般不需要修改
|
||||
#DockerRegisterDomain="20.47.129.116:8033" # 需要根据实际修改
|
||||
DockerRegisterDomain="172.10.125.92:8033" # 需要根据实际修改
|
||||
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
|
||||
|
||||
#### 需要修改以上的内容 ###
|
||||
#### 需要修改以上的内容 ###
|
||||
#### 需要修改以上的内容 ###
|
||||
|
||||
downloadAllNeededImages() {
|
||||
while [[ $# > 0 ]]; do
|
||||
pulled=""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "开始下载:${i}"
|
||||
if docker pull "${i}" >/dev/null 2>&1; then
|
||||
echo "Image pull success: ${i}"
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
if docker inspect "${i}" >/dev/null 2>&1; then
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
echo "Image pull failed: ${i}"
|
||||
fi
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${1}"
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
downloadAllNeededImagesAndCompress() {
|
||||
while [[ $# > 0 ]]; do
|
||||
pulled=""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "开始下载:${i}"
|
||||
if docker pull "${i}" >/dev/null 2>&1; then
|
||||
echo "Image pull success: ${i}"
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
if docker inspect "${i}" >/dev/null 2>&1; then
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
echo "Image pull failed: ${i}"
|
||||
fi
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${1}"
|
||||
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
|
||||
|
||||
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
|
||||
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
|
||||
echo "已经完成打包工作!"
|
||||
}
|
||||
|
||||
pushRKEImageToHarbor(){
|
||||
linux_images=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
linux_images+=("${i}");
|
||||
done < "${rancher_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
|
||||
for i in "${linux_images[@]}"; do
|
||||
[ -z "${i}" ] && continue
|
||||
case $i in
|
||||
*/*)
|
||||
image_name="${DockerRegisterDomain}/${i}"
|
||||
;;
|
||||
*)
|
||||
image_name="${DockerRegisterDomain}/rancher/${i}"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${i}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
pushCMLCAPPImageToHarbor(){
|
||||
app_images=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
app_images+=("${i}");
|
||||
done < "${cmlc_app_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
for app in "${app_images[@]}"; do
|
||||
[ -z "${app}" ] && continue
|
||||
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${app}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
pushMiddlewareImageToHarbor(){
|
||||
middleware_image=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
middleware_image+=("${i}");
|
||||
done < "${middleware_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
for app in "${middleware_image[@]}"; do
|
||||
[ -z "${app}" ] && continue
|
||||
case ${app} in
|
||||
*/*/*)
|
||||
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
|
||||
;;
|
||||
*/*)
|
||||
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${app}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
downloadAllNeededImagesAndCompress "${cmlc_app_image_list}"
|
||||
#downloadAllNeededImages "${rancher_image_list}"
|
||||
|
||||
#pushRKEImageToHarbor
|
||||
#pushCMLCAPPImageToHarbor
|
||||
#pushMiddlewareImageToHarbor
|
||||
166
998-常用脚本/镜像同步/ImageSyncDLTU.sh
Normal file
166
998-常用脚本/镜像同步/ImageSyncDLTU.sh
Normal file
@@ -0,0 +1,166 @@
|
||||
#!/bin/bash
|
||||
|
||||
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
|
||||
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
|
||||
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
|
||||
local_gzip_path="/root/octopus_image"
|
||||
|
||||
DockerRegisterDomain="172.18.10.251:8033" # 需要根据实际修改
|
||||
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
|
||||
|
||||
print_green() {
|
||||
echo -e "\033[32m${1}\033[0m"
|
||||
echo ""
|
||||
}
|
||||
|
||||
print_red() {
|
||||
echo -e "\033[31m${1}\033[0m"
|
||||
echo ""
|
||||
}
|
||||
|
||||
Download_Load_Tag_Upload() {
|
||||
print_green "[DLTU] - start !"
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
rke)
|
||||
# print_green "download rke "
|
||||
local_gzip_path="$local_gzip_path/rke"
|
||||
mkdir -p ${local_gzip_path}
|
||||
oss_prefix_url="$oss_prefix_url/rke/"
|
||||
dltu
|
||||
shift # past argument
|
||||
;;
|
||||
middle)
|
||||
local_gzip_path="$local_gzip_path/middle"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/middle/"
|
||||
dltu
|
||||
shift # past argument
|
||||
;;
|
||||
cmii)
|
||||
local_gzip_path="$local_gzip_path/cmii"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/ahejpt/"
|
||||
dltu
|
||||
shift # past argument
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
print_red "bad arguments"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
dltu() {
|
||||
print_green "download all image name list and gzip file list!"
|
||||
cd $local_gzip_path || exit
|
||||
|
||||
rm $all_image_list_txt
|
||||
rm $gzip_image_list_txt
|
||||
|
||||
wget "$oss_prefix_url$all_image_list_txt"
|
||||
wget "$oss_prefix_url$gzip_image_list_txt"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
echo ""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "download gzip file =>: $oss_prefix_url${i}"
|
||||
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
|
||||
echo "Gzip file download success : ${i}"
|
||||
image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
|
||||
|
||||
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
|
||||
echo "extract short name is $app_name"
|
||||
|
||||
if echo $image_full_name | grep -q "rancher"
|
||||
then
|
||||
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
|
||||
docker push $DockerRegisterDomain/rancher/$app_name
|
||||
else
|
||||
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
|
||||
docker push $DockerRegisterDomain/cmii/$app_name
|
||||
fi
|
||||
|
||||
else
|
||||
print_red "Gzip file download FAILED : ${i}"
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${gzip_image_list_txt}"
|
||||
shift
|
||||
|
||||
}
|
||||
|
||||
Load_Tag_Upload(){
|
||||
print_green "[LTU] - start to load image from offline !"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
rke)
|
||||
# print_green "download rke "
|
||||
local_gzip_path="$local_gzip_path/rke"
|
||||
mkdir -p ${local_gzip_path}
|
||||
oss_prefix_url="$oss_prefix_url/rke/"
|
||||
ltu
|
||||
shift # past argument
|
||||
;;
|
||||
middle)
|
||||
local_gzip_path="$local_gzip_path/middle"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/middle/"
|
||||
ltu
|
||||
shift # past argument
|
||||
;;
|
||||
cmii)
|
||||
local_gzip_path="$local_gzip_path/cmii"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/ehejpt/"
|
||||
ltu
|
||||
shift # past argument
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
print_red "bad arguments"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
ltu(){
|
||||
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
|
||||
|
||||
for file in $all_file_list; do
|
||||
echo "offline gzip file is => : $file"
|
||||
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
|
||||
|
||||
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
|
||||
echo "extract short name is $app_name"
|
||||
|
||||
if echo $image_full_name | grep -q "rancher"
|
||||
then
|
||||
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
|
||||
docker push $DockerRegisterDomain/rancher/$app_name
|
||||
else
|
||||
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
|
||||
docker push $DockerRegisterDomain/cmii/$app_name
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
test(){
|
||||
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
|
||||
echo "extract short name is $app_name"
|
||||
}
|
||||
|
||||
# test
|
||||
Download_Load_Tag_Upload "cmii"
|
||||
|
||||
#Load_Tag_Upload "rke"
|
||||
9
998-常用脚本/镜像同步/image-clean.sh
Normal file
9
998-常用脚本/镜像同步/image-clean.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -rf *.tar.gz
|
||||
rm -rf 2023*.txt
|
||||
|
||||
for item in $(mc ls demo/cmlc-installation/tmp/ | awk '{print$6}')
|
||||
do
|
||||
mc rm demo/cmlc-installation/tmp/"${item}"
|
||||
done
|
||||
438
998-常用脚本/镜像同步/image-sync.sh
Normal file
438
998-常用脚本/镜像同步/image-sync.sh
Normal file
@@ -0,0 +1,438 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 约定内容
|
||||
|
||||
NeedUploadFolder=99
|
||||
IsUpload=99
|
||||
UploadImageName=""
|
||||
ImageListName=""
|
||||
ImageTarName=""
|
||||
ImageName=""
|
||||
ImageTag=""
|
||||
InnerIPv4CIDR=""
|
||||
InnerIPv6CIDR=""
|
||||
InnerIPv4=""
|
||||
InnerIPv6=""
|
||||
CmiiImagePrefix="harbor.cdcyy.com.cn/cmii/"
|
||||
OSS_URL="https://oss.demo.uavcmlc.com/cmlc-installation/tmp/"
|
||||
Custom_Client_Harbor="REPLACE:8033"
|
||||
app_name=""
|
||||
new_tag=""
|
||||
harbor_host=""
|
||||
namespace="xmyd"
|
||||
|
||||
#######################################
|
||||
# description
|
||||
# Globals:
|
||||
# ImageName
|
||||
# ImageTag
|
||||
# ImageTarName
|
||||
# RANDOM
|
||||
# UploadImageName
|
||||
# Arguments:
|
||||
# None
|
||||
#######################################
|
||||
get_image_tar_name() {
|
||||
|
||||
# harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:4.1.6
|
||||
ImageName=$(echo $UploadImageName | cut -d ":" -f1 | cut -d"/" -f3)
|
||||
ImageTag=$(echo $UploadImageName | cut -d ":" -f2)
|
||||
local currentDate=$(date +'%Y-%m-%d')
|
||||
local random_number=$((RANDOM % 900 + 100))
|
||||
|
||||
ImageTarName="$ImageName=$ImageTag=$currentDate=$random_number.tar.gz"
|
||||
}
|
||||
|
||||
#######################################
|
||||
# description
|
||||
# Globals:
|
||||
# ImageName
|
||||
# ImageTag
|
||||
# ImageTarName
|
||||
# UploadImageName
|
||||
# Arguments:
|
||||
# None
|
||||
#######################################
|
||||
get_image_name_from_tar() {
|
||||
|
||||
if [[ $ImageTarName == cmlc* ]]; then
|
||||
# 兼容octopus-agent的模式
|
||||
# cmlc=cmii=cmii-uav-busybox=0326.tar.gz
|
||||
# rancher=rancher=cmii-uav-busybox=0326.tar.gz
|
||||
|
||||
# 使用${variable%.pattern}进行后缀删除
|
||||
|
||||
ImageName=$(echo $image_name | cut -d "=" -f3)
|
||||
local tagWithSuffix=$(echo $image_name | cut -d "=" -f4)
|
||||
ImageTag="${tagWithSuffix%.tar.gz}"
|
||||
UploadImageName="$ImageName:$ImageTag"
|
||||
return
|
||||
fi
|
||||
|
||||
# cmii-uav-oauth_4.1.6-0918_20230918_123.tar.gz
|
||||
|
||||
ImageName=$(echo $ImageTarName | cut -d "=" -f1)
|
||||
ImageTag=$(echo $ImageTarName | cut -d"=" -f2)
|
||||
|
||||
UploadImageName="$ImageName:$ImageTag"
|
||||
}
|
||||
|
||||
# 获取服务器的公网IP地址
|
||||
get_Internal_IP_CIDR() {
|
||||
|
||||
local interface_prefix=("[[:space:]]eth[0-9]{1,2}" "[[:space:]]ens[0-9]{1,3}" "[[:space:]]eno[0-9]{1,3}" "[[:space:]]enp[0-9]{1,2}")
|
||||
local real_interface="eth90"
|
||||
|
||||
for interface in "${interface_prefix[@]}"; do
|
||||
echo $(ip link show) | grep -oE ${interface} | head -1
|
||||
if [[ $? -eq 0 ]]; then
|
||||
real_interface=$(echo $(ip link show) | grep -oE ${interface} | head -1 | cut -d" " -f2)
|
||||
echo "当前主机的真实内网网卡为 => [$real_interface]"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# 提取IPv4地址(CIDR格式)
|
||||
local ipv4_regex="inet[[:space:]](25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\/[0-9]{1,2}"
|
||||
|
||||
# 提取IPv6地址(CIDR格式)
|
||||
local ipv6_regex="inet6[[:space:]]([0-9a-fA-F]{0,4}(:[0-9a-fA-F]{0,4}){1,7})\/[0-9]{1,3}"
|
||||
|
||||
# 查找IPv4地址
|
||||
local inner_ipv4=$(echo $(ip addr show $real_interface) | grep -oE $ipv4_regex | cut -d" " -f2)
|
||||
InnerIPv4CIDR=$inner_ipv4
|
||||
echo "Interface: $real_interface, IPv4 Address: $inner_ipv4"
|
||||
|
||||
# 查找IPv6地址
|
||||
local inner_ipv6=$(echo $(ip addr show $real_interface) | grep -oE $ipv6_regex | cut -d" " -f2)
|
||||
InnerIPv6CIDR=$inner_ipv6
|
||||
echo "Interface: $real_interface, IPv4 Address: $inner_ipv6"
|
||||
|
||||
}
|
||||
|
||||
#######################################
|
||||
# description
|
||||
# Globals:
|
||||
# InnerIPv4
|
||||
# InnerIPv4CIDR
|
||||
# InnerIPv6
|
||||
# InnerIPv6CIDR
|
||||
# Arguments:
|
||||
# None
|
||||
#######################################
|
||||
get_Internal_IP() {
|
||||
|
||||
get_Internal_IP_CIDR
|
||||
|
||||
InnerIPv4=$(echo $InnerIPv4CIDR | cut -d "/" -f1)
|
||||
InnerIPv6=$(echo $InnerIPv6CIDR | cut -d "/" -f1)
|
||||
|
||||
echo "服务器的内网IPv4地址为 $InnerIPv4"
|
||||
echo "服务器的内网IPv6地址为 $InnerIPv6"
|
||||
|
||||
}
|
||||
|
||||
parse_args(){
|
||||
if [ "$1" == "" ]; then
|
||||
echo "no zip file in error!"
|
||||
exit 233
|
||||
fi
|
||||
local image_name="$1"
|
||||
|
||||
|
||||
if [[ $image_name == cmlc* ]]; then
|
||||
# 兼容octopus-agent的模式
|
||||
# cmlc=cmii=cmii-uav-busybox=0326.tar.gz
|
||||
# rancher=rancher=cmii-uav-busybox=0326.tar.gz
|
||||
app_name=$(echo $image_name | cut -d "=" -f3)
|
||||
local tagWithSuffix=$(echo $image_name | cut -d "=" -f4)
|
||||
new_tag="${tagWithSuffix%.tar.gz}"
|
||||
return
|
||||
fi
|
||||
|
||||
# 本脚本的模式
|
||||
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
|
||||
# cmii-uav-mqtthandler=5.4.0-031201=2024-03-12=138.tar.gz
|
||||
app_name=$(echo $image_name | cut -d "=" -f1)
|
||||
new_tag=$(echo $image_name | cut -d "=" -f2)
|
||||
}
|
||||
|
||||
|
||||
update_image_tag(){
|
||||
if [ "$new_tag" == "" ]; then
|
||||
echo "new tag error!"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "[update_image_tag] - start to update => ${harbor_host}/cmii/$app_name:${new_tag}"
|
||||
echo ""
|
||||
|
||||
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
|
||||
|
||||
echo "image grep is => ${image_prefix}"
|
||||
|
||||
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
|
||||
echo ""
|
||||
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
|
||||
echo ""
|
||||
echo "start to wait for 3 seconds!"
|
||||
sleep 3
|
||||
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
|
||||
echo ""
|
||||
echo "new image are => $image_new"
|
||||
echo ""
|
||||
}
|
||||
|
||||
#######################################
|
||||
# description
|
||||
# Globals:
|
||||
# _m_opts
|
||||
# Arguments:
|
||||
# None
|
||||
#######################################
|
||||
_math() {
|
||||
_m_opts="$@"
|
||||
printf "%s" "$((_m_opts))"
|
||||
}
|
||||
|
||||
# 前文内容
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-fu | --fileupload)
|
||||
NeedUploadFolder=1
|
||||
ImageListName=${2}
|
||||
shift # past argument
|
||||
;;
|
||||
-fd | --filedownload)
|
||||
NeedUploadFolder=2
|
||||
ImageListName=${2}
|
||||
shift # past argument
|
||||
;;
|
||||
-u | --upload)
|
||||
IsUpload=1
|
||||
UploadImageName="${2}"
|
||||
shift # past argument
|
||||
;;
|
||||
-d | --download)
|
||||
IsUpload=2
|
||||
ImageTarName="${2}"
|
||||
shift # past argument
|
||||
;;
|
||||
-h | --harbor)
|
||||
Custom_Client_Harbor="${2}"
|
||||
shift # past argument
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
;;
|
||||
esac
|
||||
shift # past argument or value
|
||||
done
|
||||
|
||||
#######################################
|
||||
# description
|
||||
# Globals:
|
||||
# ImageTarName
|
||||
# UploadImageName
|
||||
# Arguments:
|
||||
# None
|
||||
#######################################
|
||||
Download_Compress_UploadOss() {
|
||||
|
||||
# upload
|
||||
echo "【上传】 - 需要处理的镜像名称为 => $UploadImageName"
|
||||
echo ""
|
||||
echo "【上传】 - 开始下载镜像!"
|
||||
echo ""
|
||||
if docker pull "${UploadImageName}" >/dev/null 2>&1; then
|
||||
echo "下载-镜像下载成功! => $UploadImageName"
|
||||
else
|
||||
if docker inspect "${UploadImageName}" >/dev/null 2>&1; then
|
||||
echo "下载-镜像已经存在 => $UploadImageName"
|
||||
else
|
||||
echo ""
|
||||
echo "下载-镜像下载 失败! => $UploadImageName"
|
||||
echo "下载-镜像下载 失败! => $UploadImageName"
|
||||
echo ""
|
||||
return 233
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
|
||||
get_image_tar_name
|
||||
echo "【上传】 - 将要把镜像压缩为 => $ImageTarName"
|
||||
docker save $UploadImageName | gzip --stdout >${ImageTarName}
|
||||
echo "【上传】 - 压缩成功 ! $(ls | grep ${ImageTarName})"
|
||||
echo""
|
||||
|
||||
echo "【上传】 - 开始上传至OSS中!"
|
||||
mc cp ./${ImageTarName} demo/cmlc-installation/tmp/
|
||||
echo "【上传】 - 上传OSS成功 => $(mc ls demo/cmlc-installation/tmp/ | grep ${ImageTarName})"
|
||||
echo""
|
||||
|
||||
}
|
||||
#######################################
|
||||
# description
|
||||
# Globals:
|
||||
# CmiiImagePrefix
|
||||
# ImageTarName
|
||||
# InnerIPv4
|
||||
# OSS_URL
|
||||
# UploadImageName
|
||||
# custom_prefix
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# <unknown> ...
|
||||
#######################################
|
||||
Download_Load_Tag_UploadHarbor_Update() {
|
||||
|
||||
# 更加方便
|
||||
# Check if the variable starts with "harbor"
|
||||
# if [[ $ImageTarName != harbor* ]]; then
|
||||
# ImageTarName="$CmiiImagePrefix$ImageTarName"
|
||||
# fi
|
||||
|
||||
echo "【下载】 - 需要处理的压缩包名称为 => $ImageTarName"
|
||||
|
||||
echo "【下载】 - 开始下载 ↓"
|
||||
wget "$OSS_URL$ImageTarName"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "【下载】- 下载镜像失败!退出!"
|
||||
return
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "【下载】 - 开始解压压缩包 "
|
||||
docker load <"${ImageTarName}"
|
||||
echo ""
|
||||
|
||||
get_Internal_IP
|
||||
echo "【下载】 - 开始上传镜像到本地的镜像仓库中 => $InnerIPv4:8033 "
|
||||
custom_prefix="$InnerIPv4:8033/cmii/"
|
||||
echo ""
|
||||
|
||||
get_image_name_from_tar
|
||||
echo "【下载】 - 解析得到的 私有镜像地址为 => $custom_prefix$UploadImageName"
|
||||
docker tag "$CmiiImagePrefix${UploadImageName}" "$custom_prefix$UploadImageName"
|
||||
|
||||
echo "【下载】 - 开始上传镜像到私有仓库 ↓"
|
||||
docker login -u admin -p V2ryStr@ngPss "$InnerIPv4:8033"
|
||||
echo ""
|
||||
echo "【下载】 - 开始推送到私有仓库! "
|
||||
docker push "$custom_prefix$UploadImageName"
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
echo ""
|
||||
echo "【更新】-开始更新镜像名称!"
|
||||
harbor_host="$InnerIPv4:8033"
|
||||
parse_args $ImageTarName
|
||||
update_image_tag
|
||||
}
|
||||
|
||||
if [[ $NeedUploadFolder == 1 ]]; then
|
||||
# 上传的是一个目录
|
||||
echo "【上传】 - 上传的文件目录为 => ${ImageListName}"
|
||||
|
||||
currentDate=$(date +'%Y-%m-%d')
|
||||
random_number=$((RANDOM % 900 + 100))
|
||||
tar_image_txt_file="$currentDate-$random_number.txt"
|
||||
echo "【上传】 - 复制文件目录为临时文件 => $tar_image_txt_file"
|
||||
touch $tar_image_txt_file
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
|
||||
UploadImageName=${i}
|
||||
Download_Compress_UploadOss || return ?
|
||||
|
||||
# 写入压缩文件名称
|
||||
echo "$ImageTarName" >>"$tar_image_txt_file"
|
||||
|
||||
echo "-------------------------------------------------"
|
||||
done <"${ImageListName}"
|
||||
|
||||
echo ""
|
||||
|
||||
echo "【上传】 - 上传压缩文件名称列表至OSS中 !"
|
||||
mc cp ./"$tar_image_txt_file" demo/cmlc-installation/tmp/
|
||||
echo "【上传】 - 上传OSS成功 => $(mc ls demo/cmlc-installation/tmp/ | grep "${tar_image_txt_file}")"
|
||||
echo ""
|
||||
|
||||
echo "【上传】 - 请在目标Master主机执行如下命令 ↓↓↓↓↓↓"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "source <(curl -sL https://b2.107421.xyz/image-sync.sh) -fd "$OSS_URL$tar_image_txt_file""
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
elif [[ $NeedUploadFolder == 2 ]]; then
|
||||
# file download
|
||||
echo "【下载】- 需要下载的压缩列表文件为 => $ImageListName"
|
||||
wget ${ImageListName}
|
||||
|
||||
tar_image_txt_file=$(echo ${ImageListName} | cut -d"/" -f6)
|
||||
image_count=0
|
||||
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
|
||||
ImageTarName=${i}
|
||||
Download_Load_Tag_UploadHarbor_Update
|
||||
image_count="$(_math "$image_count" + 1)"
|
||||
|
||||
|
||||
echo "-------------------------------------------------"
|
||||
done <"${tar_image_txt_file}"
|
||||
echo ""
|
||||
|
||||
echo "【下载】 - 请等待流程结束 ↓"
|
||||
_sleep_c="500"
|
||||
for (( i=1; i<$image_count; i++ )) ; do
|
||||
_sleep_c="$(_math "$_sleep_c" + 500)"
|
||||
done
|
||||
echo "【下载】 - 共有 【 $image_count 】个镜像, 等待上传结束 => $_sleep_c 秒"
|
||||
while [ "$_sleep_c" -ge "0" ]; do
|
||||
printf "\r \r"
|
||||
printf -- "%b" "$_sleep_c"
|
||||
_sleep_c="$(_math "$_sleep_c" - 1)"
|
||||
sleep 1
|
||||
done
|
||||
printf "\r"
|
||||
|
||||
fi
|
||||
|
||||
if [[ $IsUpload == 1 ]]; then
|
||||
# upload a image
|
||||
Download_Compress_UploadOss || return ?
|
||||
echo "【上传】 - 请在目标Master主机执行如下命令 ↓↓↓↓↓↓"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "source <(curl -sL https://b2.107421.xyz/image-sync.sh) -d $ImageTarName"
|
||||
echo ""
|
||||
echo ""
|
||||
Client_Harbor_Address="$Custom_Client_Harbor/cmii/$ImageName:$ImageTag"
|
||||
echo "【上传】 - 手动命令执行如下, 目标镜像全程地址为 => $Client_Harbor_Address"
|
||||
echo ""
|
||||
echo "wget $OSS_URL$ImageTarName && docker load < $ImageTarName && docker tag ${UploadImageName} $Client_Harbor_Address && docker push $Client_Harbor_Address"
|
||||
echo ""
|
||||
echo ""
|
||||
elif [[ $IsUpload == 2 ]];then
|
||||
# download a image
|
||||
Download_Load_Tag_UploadHarbor_Update
|
||||
|
||||
echo "【下载】 - 请等待流程结束 ↓"
|
||||
_sleep_c="500"
|
||||
while [ "$_sleep_c" -ge "0" ]; do
|
||||
printf "\r \r"
|
||||
printf -- "%b" "$_sleep_c"
|
||||
_sleep_c="$(_math "$_sleep_c" - 1)"
|
||||
sleep 1
|
||||
done
|
||||
printf "\r"
|
||||
fi
|
||||
25
998-常用脚本/镜像同步/清除镜像.sh
Normal file
25
998-常用脚本/镜像同步/清除镜像.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
image_name_prefix_list=(harbor.cdcyy.com.cn)
|
||||
|
||||
|
||||
for (( i=1; i<=100; i++ ))
|
||||
do
|
||||
echo $i
|
||||
for image_name_prefix in "${image_name_prefix_list[@]}"
|
||||
do
|
||||
content=$(docker image ls | grep "${image_name_prefix}" | head -n1)
|
||||
if [ ! "$content" == "" ]; then
|
||||
echo "$content"
|
||||
echo "$(echo $content | awk '{print$1}'):$(echo $content | awk '{print$2}')"
|
||||
if [ "$(echo $content | awk '{print$2}')" == "<none*" ]; then
|
||||
continue
|
||||
fi
|
||||
docker image rm "$(echo $content | awk '{print$1}'):$(echo $content | awk '{print$2}')"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "y
|
||||
|
||||
" | docker image prune
|
||||
68
998-常用脚本/镜像同步/离线更新tag脚本.sh
Normal file
68
998-常用脚本/镜像同步/离线更新tag脚本.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
|
||||
harbor_host=10.40.51.5:8033
|
||||
namespace=ynejpt
|
||||
app_name=""
|
||||
new_tag=""
|
||||
|
||||
upload_image_to_harbor(){
|
||||
if [ "$app_name" == "" ]; then
|
||||
echo "app name null exit!"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
if ! docker load < "$1"; then
|
||||
echo "docker load error !"
|
||||
fi
|
||||
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
|
||||
docker login -u admin -p V2ryStr@ngPss $harbor_host
|
||||
docker push "$harbor_host/cmii/$app_name:$new_tag"
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
}
|
||||
|
||||
parse_args(){
|
||||
if [ "$1" == "" ]; then
|
||||
echo "no zip file in error!"
|
||||
exit 233
|
||||
fi
|
||||
local image_name="$1"
|
||||
|
||||
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
|
||||
app_name=$(echo $image_name | cut -d "=" -f1)
|
||||
new_tag=$(echo $image_name | cut -d "=" -f2)
|
||||
}
|
||||
|
||||
update_image_tag(){
|
||||
if [ "$new_tag" == "" ]; then
|
||||
echo "new tag error!"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
|
||||
|
||||
echo "image grep is => ${image_prefix}"
|
||||
|
||||
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
|
||||
echo ""
|
||||
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
|
||||
echo ""
|
||||
echo "start to wait for 3 seconds!"
|
||||
sleep 3
|
||||
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
|
||||
echo ""
|
||||
echo "new image are => $image_new"
|
||||
echo ""
|
||||
}
|
||||
|
||||
main(){
|
||||
parse_args "$1"
|
||||
upload_image_to_harbor "$1"
|
||||
update_image_tag
|
||||
}
|
||||
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user