新增GPU部分

This commit is contained in:
zeaslity
2025-11-12 18:14:10 +08:00
parent 4b274a02c8
commit fd60868b97
15 changed files with 1220 additions and 96 deletions

142
.idea/workspace.xml generated
View File

@@ -4,92 +4,23 @@
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="修改CICD的jenkins构建脚本">
<change afterPath="$PROJECT_DIR$/58-202503-新DEMO环境/1-磁盘挂载.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/0-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/ai-config.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/cluster.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/0-节点lable.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-all-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-statusfulset-localpv-failed.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/install_docker_offline.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/sshd_config" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/分块压缩合.md" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/压缩文件包.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/install_auth.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/nginx-web.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/主机授权文件.json" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/授权码.json" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/0-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/cluster.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/cmii-update.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-dashboard.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-mongo.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-mysql.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nacos.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-rabbitmq.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-redis.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-srs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/disk.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/修改pvc-然后statefulset中的image.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/x_minio初始化.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/重要备份.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/x_minio初始化.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/1-高级-磁盘挂载.sh" afterDir="false" />
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="新增雄安空能院项目">
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/offline.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/关停恢复.md" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/关停脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/启动脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/备份/real-nginx-proxy.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-images-1.30.14.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/rke-1.30.14-cluster-official.yml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/rke-13014-cluster-security.yml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/rke-13014-cluster-security.yml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/12-连云港公安-5.1.0/2-helm-chart/8-gdr-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/12-连云港公安-5.1.0/2-helm-chart/8-gdr-server.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/30-刘喜通感-第三次部署/磁盘格式化.sh" beforeDir="false" afterPath="$PROJECT_DIR$/30-刘喜通感-第三次部署/磁盘格式化.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/41-202410-珠海横琴/0-dependencies.sh" beforeDir="false" afterPath="$PROJECT_DIR$/41-202410-珠海横琴/0-dependencies.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/67-202508-雄安空能院/重要备份.sh" beforeDir="false" afterPath="$PROJECT_DIR$/67-202508-雄安空能院/重要备份.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/更新脚本/一键更新Tag脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/更新脚本/一键更新Tag脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1-重启脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1-重启脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" beforeDir="false" afterPath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-部署模板/kubectl" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/999-部署模板/rke" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/关停全部的服务.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/关停全部的服务.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
@@ -125,7 +56,7 @@
&quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;,
&quot;git-widget-placeholder&quot;: &quot;main&quot;,
&quot;go.import.settings.migrated&quot;: &quot;true&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/67-202508-雄安空能院&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/69-202511-AI-GPU测试&quot;,
&quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
&quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
&quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
@@ -137,13 +68,14 @@
}</component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\69-202511-AI-GPU测试" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\68-202511-k8s升级1-30-14版本" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\deploy" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\doris-deploy" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\66-202505-浙江二级监管\已有部署备份" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\66-202505-浙江二级监管\部署文件" />
</key>
<key name="MoveFile.RECENT_KEYS">
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\关停计划\备份" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\999-部署模板" />
@@ -152,7 +84,7 @@
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-js-predefined-d6986cc7102b-e03c56caf84a-JavaScript-IU-252.23892.409" />
<option value="bundled-js-predefined-d6986cc7102b-3aa1da707db6-JavaScript-IU-252.27397.103" />
</set>
</attachedChunks>
</component>
@@ -199,7 +131,23 @@
<workItem from="1754546282094" duration="6319000" />
<workItem from="1754616863007" duration="8752000" />
<workItem from="1754744988183" duration="10000" />
<workItem from="1754963140025" duration="708000" />
<workItem from="1754963140025" duration="2134000" />
<workItem from="1755951885649" duration="438000" />
<workItem from="1756714518451" duration="696000" />
<workItem from="1756717041659" duration="90000" />
<workItem from="1756796460416" duration="1770000" />
<workItem from="1758010205322" duration="667000" />
<workItem from="1758173885896" duration="3566000" />
<workItem from="1758507606314" duration="1297000" />
<workItem from="1758683301121" duration="396000" />
<workItem from="1760519135681" duration="5000" />
<workItem from="1760940560020" duration="4889000" />
<workItem from="1762223020221" duration="1036000" />
<workItem from="1762323446517" duration="621000" />
<workItem from="1762329425217" duration="5788000" />
<workItem from="1762760898943" duration="4498000" />
<workItem from="1762849000043" duration="5966000" />
<workItem from="1762928252671" duration="4692000" />
</task>
<task id="LOCAL-00001" summary="common update">
<option name="closed" value="true" />
@@ -217,12 +165,23 @@
<option name="project" value="LOCAL" />
<updated>1744874102820</updated>
</task>
<option name="localTasksCounter" value="3" />
<task id="LOCAL-00003" summary="新增雄安空能院项目">
<option name="closed" value="true" />
<created>1754963979625</created>
<option name="number" value="00003" />
<option name="presentableId" value="LOCAL-00003" />
<option name="project" value="LOCAL" />
<updated>1754963979625</updated>
</task>
<option name="localTasksCounter" value="4" />
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
<component name="UnknownFeatures">
<option featureType="com.intellij.fileTypeFactory" implementationName="*.conf" />
</component>
<component name="Vcs.Log.Tabs.Properties">
<option name="TAB_STATES">
<map>
@@ -237,7 +196,8 @@
<component name="VcsManagerConfiguration">
<MESSAGE value="common update" />
<MESSAGE value="修改CICD的jenkins构建脚本" />
<option name="LAST_COMMIT_MESSAGE" value="修改CICD的jenkins构建脚本" />
<MESSAGE value="新增雄安空能院项目" />
<option name="LAST_COMMIT_MESSAGE" value="新增雄安空能院项目" />
</component>
<component name="VgoProject">
<settings-migrated>true</settings-migrated>

View File

@@ -0,0 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: pyfusion-configmap
namespace: xakny
data:
config.yaml: |-
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmii"
password: "odD8#Ve7.B"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"

View File

@@ -0,0 +1,9 @@
server {
listen 8088;
server_name localhost;
charset utf-8;
location / {
add_header Content-Type 'text/html; charset=utf-8';
return 200 "平台已过试用期,请联系系统管理员";
}
}

View File

@@ -0,0 +1,35 @@
# 关停
## 关停Nginx
1. 移动 /etc/nginx/conf.d/real-proxy.conf /etc/nginx/conf.d/real-proxy.conf_back
2. 移动 /etc/nginx/conf.d/offline.conf_back /etc/nginx/conf.d/offline.conf
3. 重启nginx systemctl restart nginx
## 执行harbor关停
docker-compose -f /root/wdd/harbor/docker-compose.yml down -v
## 执行关停暴露面
kubectl delete -f /root/wdd/install/k8s-ingress-nginx.yaml
## 执行关停脚本
bash /root/wdd/ccc.sh
# 恢复
## 恢复nginx
1. 移动 /etc/nginx/conf.d/real-proxy.conf_back /etc/nginx/conf.d/real-proxy.conf
2. 移动 /etc/nginx/conf.d/offline.conf /etc/nginx/conf.d/offline.conf_back
3. 重启nginx systemctl restart nginx
## 启动Harbor
docker-compose -f /root/wdd/harbor/docker-compose.yml up -d
等待30秒
## 开启ingress暴露面
kubectl apply -f /root/wdd/install/k8s-ingress-nginx.yaml
## 恢复业务
kubectl apply -f /root/wdd/all-deployment-xakny.yaml

View File

@@ -0,0 +1,167 @@
#!/bin/bash
#
# @author Assistant
# @version 1.0
# @license MIT
#
# Script to manage nginx configuration changes, stop harbor, and run a custom script.
# Logs all operations to /root/wdd/ccc.log with detailed levels.
#
# Dependencies:
# - Required commands: mv, systemctl, docker-compose, bash
# - File paths: /etc/nginx/conf.d/, /root/wdd/harbor/docker-compose.yml, /root/wdd/ccc.sh
# - Permissions: root access for file operations and systemctl
#
# Global Constants
readonly LOG_FILE="/root/wdd/ccc.log"
# Error handling: exit on error, unset variable, pipe failure; trap signals
set -euo pipefail
trap 'error_handler' EXIT ERR INT TERM
###
# Logs a message with timestamp and level to both the log file and stderr.
# @param level string The log level (DEBUG, INFO, WARN, ERROR)
# @param message string The message to log
# @return void
# @require date, tee commands
###
log() {
local level="$1"
local message="$2"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] [$level] $message" | tee -a "$LOG_FILE" >&2
}
###
# Handles errors during script execution, logs error, and exits.
# @return void
###
error_handler() {
local exit_code=$?
log ERROR "Script terminated with exit code $exit_code"
exit $exit_code
}
###
# Moves a file from source to destination with logging.
# @param source_path string Path to the source file
# @param dest_path string Path to the destination file
# @return 0 on success, non-zero on failure
# @require mv command
###
move_file() {
local source_path="$1"
local dest_path="$2"
log INFO "Moving file: $source_path to $dest_path"
# > Check if source exists before moving
if [[ ! -f "$source_path" ]]; then
log WARN "Source file $source_path does not exist, skipping move"
return 1
fi
mv "$source_path" "$dest_path"
log INFO "File moved successfully: $source_path to $dest_path"
return 0
}
###
# Restarts the nginx service with systemctl.
# @return 0 on success, non-zero on failure
# @require systemctl command, nginx service
###
restart_nginx() {
log INFO "Restarting nginx service"
systemctl restart nginx
log INFO "Nginx restarted successfully"
return 0
}
###
# Restarts the nginx service with systemctl.
# @return 0 on success, non-zero on failure
# @require systemctl command, nginx service
###
stop_ingress_expose() {
log INFO "Stop Ingress Exposition!"
kubectl delete -f /root/wdd/install/k8s-ingress-nginx.yaml
log INFO "Stop Ingress Exposition Success"
return 0
}
###
# Stops the harbor stack using docker-compose down.
# @return 0 on success, non-zero on failure
# @require docker-compose command, /root/wdd/harbor/docker-compose.yml
###
stop_harbor() {
local compose_file="/root/wdd/harbor/docker-compose.yml"
log INFO "Stopping harbor stack with docker-compose"
# > Check if docker-compose file exists
if [[ ! -f "$compose_file" ]]; then
log ERROR "Docker-compose file $compose_file not found"
return 1
fi
docker-compose -f "$compose_file" down -v
log INFO "Harbor stack stopped successfully"
return 0
}
###
# Executes a bash script with logging.
# @param script_path string Path to the script to execute
# @return Exit code of the executed script
# @require bash command
###
run_script() {
local script_path="$1"
log INFO "Executing script: $script_path"
# > Check if script exists and is executable
if [[ ! -f "$script_path" ]]; then
log ERROR "Script $script_path not found"
return 1
fi
if [[ ! -x "$script_path" ]]; then
log WARN "Script $script_path is not executable, attempting to run with bash"
fi
bash "$script_path"
local script_exit_code=$?
log INFO "Script executed with exit code: $script_exit_code"
return $script_exit_code
}
###
# Main function orchestrating the entire process.
# @return void
###
main() {
log INFO "Starting main script execution"
# Move first configuration file
move_file "/etc/nginx/conf.d/real-proxy.conf" "/etc/nginx/conf.d/real-proxy.conf_back"
# Move second configuration file
move_file "/etc/nginx/conf.d/offline.conf_back" "/etc/nginx/conf.d/offline.conf"
# Restart nginx service
restart_nginx
# Stop harbor stack
stop_harbor
# Execute the custom script
run_script "/root/wdd/ccc.sh"
log INFO "Main script completed successfully"
}
# Function Call Graph:
# main
# -> move_file (for real-proxy.conf)
# -> move_file (for offline.conf_back)
# -> restart_nginx
# -> stop_harbor
# -> run_script
# Execute main function
main "$@"

View File

@@ -0,0 +1,186 @@
#!/bin/bash
#
# Nginx及业务环境恢复脚本
# Author: AI Assistant
# Version: 1.0.0
# License: MIT
#
# 全局常量定义
readonly LOG_FILE="/root/wdd/ccc.log"
readonly NGINX_CONF_BACKUP="/etc/nginx/conf.d/real-proxy.conf_back"
readonly NGINX_CONF_LIVE="/etc/nginx/conf.d/real-proxy.conf"
readonly OFFLINE_CONF_LIVE="/etc/nginx/conf.d/offline.conf"
readonly OFFLINE_CONF_BACKUP="/etc/nginx/conf.d/offline.conf_back"
readonly HARBOR_COMPOSE_FILE="/root/wdd/harbor/docker-compose.yml"
readonly K8S_INGRESS_FILE="/root/wdd/install/k8s-ingress-nginx.yaml"
readonly K8S_DEPLOYMENT_FILE="/root/wdd/all-deployment-xakny.yaml"
# 依赖命令检查列表
readonly REQUIRED_COMMANDS=("systemctl" "docker-compose" "kubectl" "mv" "sleep")
# 初始化脚本执行环境
set -euo pipefail
trap 'log ERROR "脚本被中断"; exit 130' INT TERM
###
# 分级日志记录函数
# @param level string 日志级别DEBUG/INFO/WARN/ERROR
# @param message string 日志消息内容
# @return 无返回值
# @require 无外部依赖
###
log() {
local level="$1"
local message="$2"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local log_entry="[${timestamp}] ${level}: ${message}"
# 输出到控制台和日志文件
echo "${log_entry}" | tee -a "${LOG_FILE}" >&2
}
###
# 检查命令依赖是否存在
# @param 无参数
# @return 0-检查通过 1-缺少依赖
# @require 无外部依赖
###
check_dependencies() {
for cmd in "${REQUIRED_COMMANDS[@]}"; do
if ! command -v "${cmd}" &> /dev/null; then
log ERROR "缺少必要命令: ${cmd}"
return 1
fi
done
log DEBUG "所有依赖命令检查通过"
return 0
}
###
# 恢复Nginx配置文件并重启服务
# @param 无参数
# @return 0-操作成功 1-操作失败
# @require systemctl, mv命令
###
restore_nginx() {
log INFO "开始恢复Nginx配置"
# > 恢复主配置文件
if ! mv "${NGINX_CONF_BACKUP}" "${NGINX_CONF_LIVE}"; then
log ERROR "恢复real-proxy.conf失败"
return 1
fi
log DEBUG "成功恢复real-proxy.conf"
# > 备份离线配置文件
if ! mv "${OFFLINE_CONF_LIVE}" "${OFFLINE_CONF_BACKUP}"; then
log WARN "备份offline.conf失败可能文件不存在"
else
log DEBUG "成功备份offline.conf"
fi
# > 重启Nginx服务
if ! systemctl restart nginx; then
log ERROR "Nginx服务重启失败"
return 1
fi
log INFO "Nginx服务重启成功"
return 0
}
###
# 启动Harbor容器服务
# @param 无参数
# @return 0-启动成功 1-启动失败
# @require docker-compose, sleep命令
###
start_harbor() {
log INFO "开始启动Harbor服务"
if ! docker-compose -f "${HARBOR_COMPOSE_FILE}" up -d; then
log ERROR "Harbor启动失败"
return 1
fi
log INFO "Harbor启动完成等待30秒初始化..."
sleep 30
log DEBUG "Harbor初始化等待完成"
return 0
}
###
# 启用K8S Ingress暴露面
# @param 无参数
# @return 0-应用成功 1-应用失败
# @require kubectl命令
###
enable_ingress() {
log INFO "开始应用K8S Ingress配置"
if ! kubectl apply -f "${K8S_INGRESS_FILE}"; then
log ERROR "Ingress配置应用失败"
return 1
fi
log INFO "Ingress配置应用成功"
return 0
}
###
# 恢复业务Deployment配置
# @param 无参数
# @return 0-应用成功 1-应用失败
# @require kubectl命令
###
restore_business() {
log INFO "开始恢复业务Deployment"
if ! kubectl apply -f "${K8S_DEPLOYMENT_FILE}"; then
log ERROR "业务Deployment应用失败"
return 1
fi
log INFO "业务Deployment恢复成功"
return 0
}
###
# 主执行函数
# @param 无参数
# @return 0-全部成功 1-任意步骤失败
# @require 所有子函数依赖
###
main() {
log INFO "===== 开始执行环境恢复脚本 ====="
# > 检查命令依赖
if ! check_dependencies; then
log ERROR "依赖检查失败,脚本终止"
return 1
fi
# 函数执行序列
local steps=(
restore_nginx
start_harbor
enable_ingress
restore_business
)
for step in "${steps[@]}"; do
if ! ${step}; then
log ERROR "执行步骤 ${step} 失败"
return 1
fi
done
log INFO "===== 环境恢复脚本执行完成 ====="
return 0
}
# 执行主函数并捕获退出状态
if main; then
exit 0
else
exit 1
fi

View File

@@ -0,0 +1,50 @@
upstream proxy_server {
ip_hash;
server 192.168.0.3:30500;
server 192.168.0.4:30500;
server 192.168.0.5:30500;
}
server {
listen 8088;
server_name localhost;
location / {
proxy_pass http://proxy_server;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.xakny.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -1,3 +1,7 @@
%}3}vbJXWv
192.168.0.2
SuperCyy.123

View File

@@ -0,0 +1,22 @@
rancher/mirrored-coreos-etcd:v3.5.12
rancher/rke-tools:v0.1.114
rancher/mirrored-k8s-dns-kube-dns:1.23.0
rancher/mirrored-k8s-dns-dnsmasq-nanny:1.23.0
rancher/mirrored-k8s-dns-sidecar:1.23.0
rancher/mirrored-cluster-proportional-autoscaler:v1.9.0
rancher/mirrored-coredns-coredns:1.11.1
rancher/mirrored-cluster-proportional-autoscaler:v1.9.0
rancher/mirrored-k8s-dns-node-cache:1.23.0
rancher/hyperkube:v1.30.14-rancher1
rancher/mirrored-flannel-flannel:v0.25.1
rancher/flannel-cni:v1.4.1-rancher1
rancher/mirrored-calico-node:v3.28.1
rancher/calico-cni:v3.28.1-rancher1
rancher/mirrored-calico-kube-controllers:v3.28.1
rancher/mirrored-calico-ctl:v3.28.1
rancher/mirrored-calico-pod2daemon-flexvol:v3.28.1
rancher/mirrored-pause:3.7
rancher/nginx-ingress-controller:nginx-1.11.5-rancher1
rancher/mirrored-nginx-ingress-controller-defaultbackend:1.5-rancher2
rancher/mirrored-ingress-nginx-kube-webhook-certgen:v1.5.2
rancher/mirrored-metrics-server:v0.7.1

View File

@@ -0,0 +1,217 @@
nodes:
- address: ""
port: "22"
internal_address: ""
role:
- controlplane
hostname_override: ""
user: ubuntu
docker_socket: /var/run/docker.sock
ssh_key: ""
ssh_key_path: ~/.ssh/id_rsa
ssh_cert: ""
ssh_cert_path: ""
labels: {}
taints: []
services:
etcd:
image: ""
extra_args: {}
extra_args_array: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_args_array: {}
win_extra_binds: []
win_extra_env: []
external_urls: []
ca_cert: ""
cert: ""
key: ""
path: ""
uid: 0
gid: 0
snapshot: null
retention: ""
creation: ""
backup_config: null
kube-api:
image: ""
extra_args: {}
extra_args_array: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_args_array: {}
win_extra_binds: []
win_extra_env: []
service_cluster_ip_range: 10.43.0.0/16
service_node_port_range: ""
pod_security_configuration: ""
always_pull_images: false
secrets_encryption_config: null
audit_log: null
admission_configuration: null
event_rate_limit: null
kube-controller:
image: ""
extra_args: {}
extra_args_array: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_args_array: {}
win_extra_binds: []
win_extra_env: []
cluster_cidr: 10.42.0.0/16
service_cluster_ip_range: 10.43.0.0/16
scheduler:
image: ""
extra_args: {}
extra_args_array: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_args_array: {}
win_extra_binds: []
win_extra_env: []
kubelet:
image: ""
extra_args: {}
extra_args_array: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_args_array: {}
win_extra_binds: []
win_extra_env: []
cluster_domain: cluster.local
infra_container_image: ""
cluster_dns_server: 10.43.0.10
fail_swap_on: false
generate_serving_certificate: false
kubeproxy:
image: ""
extra_args: {}
extra_args_array: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_args_array: {}
win_extra_binds: []
win_extra_env: []
network:
plugin: canal
options: {}
mtu: 0
node_selector: {}
update_strategy: null
tolerations: []
enable_br_netfilter: null
authentication:
strategy: x509
sans: []
webhook: null
addons: ""
addons_include: []
system_images:
etcd: rancher/mirrored-coreos-etcd:v3.5.12
alpine: rancher/rke-tools:v0.1.114
nginx_proxy: rancher/rke-tools:v0.1.114
cert_downloader: rancher/rke-tools:v0.1.114
kubernetes_services_sidecar: rancher/rke-tools:v0.1.114
kubedns: rancher/mirrored-k8s-dns-kube-dns:1.23.0
dnsmasq: rancher/mirrored-k8s-dns-dnsmasq-nanny:1.23.0
kubedns_sidecar: rancher/mirrored-k8s-dns-sidecar:1.23.0
kubedns_autoscaler: rancher/mirrored-cluster-proportional-autoscaler:v1.9.0
coredns: rancher/mirrored-coredns-coredns:1.11.1
coredns_autoscaler: rancher/mirrored-cluster-proportional-autoscaler:v1.9.0
nodelocal: rancher/mirrored-k8s-dns-node-cache:1.23.0
kubernetes: rancher/hyperkube:v1.30.14-rancher1
flannel: rancher/mirrored-flannel-flannel:v0.25.1
flannel_cni: rancher/flannel-cni:v1.4.1-rancher1
calico_node: rancher/mirrored-calico-node:v3.28.1
calico_cni: rancher/calico-cni:v3.28.1-rancher1
calico_controllers: rancher/mirrored-calico-kube-controllers:v3.28.1
calico_ctl: rancher/mirrored-calico-ctl:v3.28.1
calico_flexvol: rancher/mirrored-calico-pod2daemon-flexvol:v3.28.1
canal_node: rancher/mirrored-calico-node:v3.28.1
canal_cni: rancher/calico-cni:v3.28.1-rancher1
canal_controllers: rancher/mirrored-calico-kube-controllers:v3.28.1
canal_flannel: rancher/mirrored-flannel-flannel:v0.25.1
canal_flexvol: rancher/mirrored-calico-pod2daemon-flexvol:v3.28.1
weave_node: ""
weave_cni: ""
pod_infra_container: rancher/mirrored-pause:3.7
ingress: rancher/nginx-ingress-controller:nginx-1.11.5-rancher1
ingress_backend: rancher/mirrored-nginx-ingress-controller-defaultbackend:1.5-rancher2
ingress_webhook: rancher/mirrored-ingress-nginx-kube-webhook-certgen:v1.5.2
metrics_server: rancher/mirrored-metrics-server:v0.7.1
windows_pod_infra_container: rancher/mirrored-pause:3.7
aci_cni_deploy_container: noiro/cnideploy:6.1.1.4.81c2369
aci_host_container: noiro/aci-containers-host:6.1.1.4.81c2369
aci_opflex_container: noiro/opflex:6.1.1.4.81c2369
aci_mcast_container: noiro/opflex:6.1.1.4.81c2369
aci_ovs_container: noiro/openvswitch:6.1.1.4.81c2369
aci_controller_container: noiro/aci-containers-controller:6.1.1.4.81c2369
aci_gbp_server_container: ""
aci_opflex_server_container: ""
ssh_key_path: ~/.ssh/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
mode: rbac
options: {}
ignore_docker_version: null
enable_cri_dockerd: null
kubernetes_version: ""
private_registries: []
ingress:
provider: ""
options: {}
node_selector: {}
extra_args: {}
dns_policy: ""
extra_envs: []
extra_volumes: []
extra_volume_mounts: []
update_strategy: null
http_port: 0
https_port: 0
network_mode: ""
tolerations: []
default_backend: null
default_http_backend_priority_class_name: ""
nginx_ingress_controller_priority_class_name: ""
default_ingress_class: null
cluster_name: ""
cloud_provider:
name: ""
prefix_path: ""
win_prefix_path: ""
addon_job_timeout: 0
bastion_host:
address: ""
port: ""
user: ""
ssh_key: ""
ssh_key_path: ""
ssh_cert: ""
ssh_cert_path: ""
ignore_proxy_env_vars: false
monitoring:
provider: ""
options: {}
node_selector: {}
update_strategy: null
replicas: null
tolerations: []
metrics_server_priority_class_name: ""
restore:
restore: false
snapshot_name: ""
rotate_encryption_key: false
dns: null
cri_dockerd_stream_server_address: ""
cri_dockerd_stream_server_port: ""

View File

@@ -0,0 +1,224 @@
nodes:
- address: 192.168.0.8
user: rke-installer
role:
- controlplane
- etcd
- worker
internal_address: 192.168.0.8
hostname_override: "master-192.168.0.8"
labels:
ingress-deploy: true
uavcloud.env: demo
- address: 192.168.0.65
user: rke-installer
role:
- worker
internal_address: 192.168.0.65
labels:
uavcloud.env: demo
authentication:
strategy: x509
sans:
- "192.168.0.8"
private_registries:
- url: 192.168.0.8:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.30.14-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.100.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 30500
https_port: 31500
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
# client-body-timeout: '6000'
# compute-full-forwarded-for: 'true'
# enable-underscores-in-headers: 'true'
# log-format-escape-json: 'true'
# log-format-upstream: >-
# { "msec": "$msec", "connection": "$connection", "connection_requests":
# "$connection_requests", "pid": "$pid", "request_id": "$request_id",
# "request_length": "$request_length", "remote_addr": "$remote_addr",
# "remote_user": "$remote_user", "remote_port": "$remote_port",
# "http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
# "$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
# "request_uri": "$request_uri", "args": "$args", "status": "$status",
# "body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
# "http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
# "http_host": "$http_host", "server_name": "$server_name", "request_time":
# "$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
# "$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
# "upstream_response_time": "$upstream_response_time",
# "upstream_response_length": "$upstream_response_length",
# "upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
# "$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
# "request_method": "$request_method", "server_protocol": "$server_protocol",
# "pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
# "geoip_country_code": "$geoip_country_code" }
# proxy-body-size: 5120m
# proxy-read-timeout: '6000'
# proxy-send-timeout: '6000'

View File

@@ -0,0 +1,231 @@
nodes:
- address: 192.168.40.50
user: root
role:
- controlplane
- etcd
- worker
internal_address: 192.168.40.50
hostname_override: "master-192.168.40.50"
labels:
ingress-deploy: true
uavcloud.env: demo
- address: 192.168.119.105
user: root
role:
- worker
internal_address: 192.168.119.105
labels:
uavcloud.env: demo
- address: 192.168.119.106
user: root
role:
- worker
internal_address: 192.168.119.106
labels:
uavcloud.env: demo
authentication:
strategy: x509
sans:
- "192.168.40.50"
#private_registries:
# - url: 192.168.0.8:8033 # 私有镜像库地址
# user: admin
# password: "V2ryStr@ngPss"
# is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.30.14-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.100.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 30500
https_port: 31500
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
# client-body-timeout: '6000'
# compute-full-forwarded-for: 'true'
# enable-underscores-in-headers: 'true'
# log-format-escape-json: 'true'
# log-format-upstream: >-
# { "msec": "$msec", "connection": "$connection", "connection_requests":
# "$connection_requests", "pid": "$pid", "request_id": "$request_id",
# "request_length": "$request_length", "remote_addr": "$remote_addr",
# "remote_user": "$remote_user", "remote_port": "$remote_port",
# "http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
# "$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
# "request_uri": "$request_uri", "args": "$args", "status": "$status",
# "body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
# "http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
# "http_host": "$http_host", "server_name": "$server_name", "request_time":
# "$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
# "$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
# "upstream_response_time": "$upstream_response_time",
# "upstream_response_length": "$upstream_response_length",
# "upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
# "$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
# "request_method": "$request_method", "server_protocol": "$server_protocol",
# "pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
# "geoip_country_code": "$geoip_country_code" }
# proxy-body-size: 5120m
# proxy-read-timeout: '6000'
# proxy-send-timeout: '6000'

View File

@@ -8,7 +8,10 @@ env:
value: "eth0"
# 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD="interface=eth0"
# 2025年11月10日
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD="interface=eth0,enp125s0f0,p64p2"
# 删除所有的calico pod

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=xmyd
namespace=xakny
# 优雅地处理Deployment缩容
scale_deployments() {

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=eedsjc-uavms
namespace=xakny
install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
@@ -44,7 +44,7 @@ backup_all_stateful_sets() {
echo ""
}
#install_yq
install_yq
backup_all_deployment
backup_all_service
backup_all_stateful_sets