diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index 648e9df..849ed05 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -4,31 +4,100 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
+
-
-
-
@@ -60,39 +129,40 @@
- {
- "keyToString": {
- "KUBERNETES_SUPPRESS_CONFIG_CLUSTER_SUGGESTION": "true",
- "RunOnceActivity.ShowReadmeOnStart": "true",
- "RunOnceActivity.git.unshallow": "true",
- "RunOnceActivity.go.formatter.settings.were.checked": "true",
- "RunOnceActivity.go.migrated.go.modules.settings": "true",
- "RunOnceActivity.typescript.service.memoryLimit.init": "true",
- "SHARE_PROJECT_CONFIGURATION_FILES": "true",
- "git-widget-placeholder": "main",
- "go.import.settings.migrated": "true",
- "last_opened_file_path": "C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/69-202511-AI-GPU测试",
- "node.js.detected.package.eslint": "true",
- "node.js.detected.package.tslint": "true",
- "node.js.selected.package.eslint": "(autodetect)",
- "node.js.selected.package.tslint": "(autodetect)",
- "nodejs_package_manager_path": "npm",
- "settings.editor.selected.configurable": "editor.preferences.tabs",
- "vue.rearranger.settings.migration": "true"
+
+}]]>
-
-
-
-
-
+
+
+
+
+
@@ -104,8 +174,8 @@
-
-
+
+
@@ -201,7 +271,26 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -235,13 +324,22 @@
1762942452911
-
+
+
+ 1765534772530
+
+
+
+ 1765534772530
+
+
+
@@ -260,6 +358,7 @@
-
+
+
\ No newline at end of file
diff --git a/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml b/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml
index 0326276..2f3173b 100644
--- a/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml
+++ b/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml
@@ -8,7 +8,7 @@ data:
mqtt:
broker: "helm-emqxs"
port: 1883
- username: "cmii"
+ username: "cmlc"
password: "odD8#Ve7.B"
topics:
diff --git a/67-202508-雄安空能院/minio整体复制.sh b/67-202508-雄安空能院/minio整体复制.sh
new file mode 100644
index 0000000..c7f82f1
--- /dev/null
+++ b/67-202508-雄安空能院/minio整体复制.sh
@@ -0,0 +1,11 @@
+
+
+
+mc alias set minio-old http://10.22.48.5:39110 cmii B#923fC7mk
+mc alias set minio-new http://10.22.48.7:39010 cmii B#923fC7mk
+
+mc ls minio-old
+mc ls minio-new
+
+
+mc mirror --overwrite --watch minio-old minio-new
\ No newline at end of file
diff --git a/71-202601-XA监管平台/1-批量脚本.sh b/71-202601-XA监管平台/1-批量脚本.sh
new file mode 100644
index 0000000..534705a
--- /dev/null
+++ b/71-202601-XA监管平台/1-批量脚本.sh
@@ -0,0 +1,40 @@
+
+
+mv agent-wdd_linux_amd64 /usr/local/bin/agent-wdd
+chmod +x /usr/local/bin/agent-wdd
+
+# 主节点安装ssh-key
+/usr/local/bin/agent-wdd base ssh config
+/usr/local/bin/agent-wdd base ssh key
+
+# 批量执行命令
+host_list=(
+ 10.22.57.5
+ 10.22.57.6
+ 10.22.57.7
+ 10.22.57.3
+ 10.22.57.4
+)
+
+for server in "${host_list[@]}";do
+ echo "current ip is $server"
+
+ ssh root@${server} "systemctl restart docker"
+ ssh root@${server} "docker info"
+
+ echo ""
+done
+
+
+scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
+ssh root@${server} "/usr/local/bin/agent-wdd base ssh config && /usr/local/bin/agent-wdd base ssh key"
+ssh root@${server} "echo yes"
+
+
+ssh root@${server} "echo \"\"> /etc/apt/apt.conf.d/01proxy"
+ssh root@${server} "printf '%s\n' \
+'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
+'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
+| tee /etc/apt/apt.conf.d/01proxy >/dev/null"
+ssh root@${server} "apt-get update"
+ssh root@${server} "apt-get install -y gparted"
\ No newline at end of file
diff --git a/71-202601-XA监管平台/2-disk.sh b/71-202601-XA监管平台/2-disk.sh
new file mode 100644
index 0000000..9363b7c
--- /dev/null
+++ b/71-202601-XA监管平台/2-disk.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+set -e
+
+# 用户配置部分
+DISK="/dev/vdb" # 要操作的物理磁盘(请根据实际情况修改)
+MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
+FS_TYPE="ext4" # 文件系统类型(支持ext4/xfs,默认ext4)
+
+#----------------------------------------------------------
+# 核心逻辑(建议非必要不修改)
+#----------------------------------------------------------
+
+function check_prerequisites() {
+ # 必须root权限运行检查
+ [[ $EUID -ne 0 ]] && echo -e "\033[31m错误:必须使用root权限运行此脚本\033[0m" && exit 1
+
+ # 磁盘存在性检查
+ [[ ! -b "$DISK" ]] && echo -e "\033[31m错误:磁盘 $DISK 不存在\033[0m" && exit 1
+
+ # 文件系统类型校验
+ if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
+ echo -e "\033[31m错误:不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
+ exit 1
+ fi
+}
+
+function prepare_disk() {
+ local partition="${DISK}1"
+
+ echo -e "\033[34m正在初始化磁盘分区...\033[0m"
+ parted "$DISK" --script mklabel gpt
+ parted "$DISK" --script mkpart primary 0% 100%
+ parted "$DISK" --script set 1 lvm on
+ partprobe "$DISK" # 确保系统识别新分区表
+
+ echo -e "\033[34m正在创建LVM结构...\033[0m"
+ pvcreate "$partition"
+ vgcreate datavg "$partition"
+ lvcreate -y -l 100%FREE -n lvdata datavg
+}
+
+function format_and_mount() {
+ echo -e "\033[34m格式化逻辑卷...\033[0m"
+ if [[ "$FS_TYPE" == "ext4" ]]; then
+ mkfs.ext4 -F "/dev/datavg/lvdata"
+ else
+ mkfs.xfs -f "/dev/datavg/lvdata"
+ fi
+
+ echo -e "\033[34m设置挂载配置...\033[0m"
+ mkdir -p "$MOUNT_PATH"
+ UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
+ echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
+ mount -a
+}
+
+function verify_result() {
+ echo -e "\n\033[1;36m最终验证结果:\033[0m"
+ lsblk -f "$DISK"
+ echo -e "\n磁盘空间使用情况:"
+ df -hT "$MOUNT_PATH"
+}
+
+# 主执行流程
+check_prerequisites
+prepare_disk
+format_and_mount
+verify_result
+
+echo -e "\n\033[32m操作执行完毕,请仔细核查上述输出信息\033[0m"
+
+
+
+#请写一个shell脚本,脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
+#1.将物理磁盘的盘符修改为gpt格式
+#2.将物理磁盘全部空间创建一个分区,分区格式为lvm
+#3.将分区分配给逻辑卷datavg
+#4.将datavg所有可用的空间分配给逻辑卷lvdata
+#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
+#6.创建变量挂载点路径
+#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
+#8.执行lsblk和df -TH查看分区是否正确挂载
+
+
diff --git a/71-202601-XA监管平台/ImageSyncDLTU.sh b/71-202601-XA监管平台/ImageSyncDLTU.sh
new file mode 100644
index 0000000..4793ac0
--- /dev/null
+++ b/71-202601-XA监管平台/ImageSyncDLTU.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+
+all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
+gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
+oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
+local_gzip_path="/root/octopus-image"
+
+DockerRegisterDomain="10.22.57.8:8033" # 需要根据实际修改
+HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
+
+print_green() {
+ echo -e "\033[32m${1}\033[0m"
+ echo ""
+}
+
+print_red() {
+ echo -e "\033[31m${1}\033[0m"
+ echo ""
+}
+
+Download_Load_Tag_Upload() {
+ print_green "[DLTU] - start !"
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ rke)
+ # print_green "download rke "
+ local_gzip_path="$local_gzip_path/rke13014"
+ mkdir -p ${local_gzip_path}
+ oss_prefix_url="$oss_prefix_url/rke13014/"
+ dltu
+ shift # past argument
+ ;;
+ middle)
+ local_gzip_path="$local_gzip_path/middle"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/middle/"
+ dltu
+ shift # past argument
+ ;;
+ cmii)
+ local_gzip_path="$local_gzip_path/xauas22"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/xauas22/"
+ dltu
+ shift # past argument
+ ;;
+ *)
+ # unknown option
+ print_red "bad arguments"
+ ;;
+ esac
+ done
+
+}
+
+dltu() {
+ print_green "download all image name list and gzip file list!"
+ cd $local_gzip_path || exit
+
+ rm $all_image_list_txt
+ rm $gzip_image_list_txt
+
+ wget "$oss_prefix_url$all_image_list_txt"
+ wget "$oss_prefix_url$gzip_image_list_txt"
+
+ docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
+ echo ""
+ while IFS= read -r i; do
+ [ -z "${i}" ] && continue
+ echo "download gzip file =>: $oss_prefix_url${i}"
+ if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
+ echo "Gzip file download success : ${i}"
+ image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
+
+ app_name=$(echo "$image_full_name" | sed 's|.*/||g')
+ echo "extract short name is $app_name"
+
+ if echo $image_full_name | grep -q "rancher"
+ then
+ print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
+ docker push $DockerRegisterDomain/rancher/$app_name
+ else
+ print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
+ docker push $DockerRegisterDomain/cmii/$app_name
+ fi
+
+ else
+ print_red "Gzip file download FAILED : ${i}"
+ fi
+ echo "-------------------------------------------------"
+ done <"${gzip_image_list_txt}"
+ shift
+
+}
+
+Load_Tag_Upload(){
+ print_green "[LTU] - start to load image from offline !"
+
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ rke)
+ # print_green "download rke "
+ local_gzip_path="$local_gzip_path/rke13014"
+ mkdir -p ${local_gzip_path}
+ oss_prefix_url="$oss_prefix_url/rke13014/"
+ ltu
+ shift # past argument
+ ;;
+ middle)
+ local_gzip_path="$local_gzip_path/middle"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/middle/"
+ ltu
+ shift # past argument
+ ;;
+ cmii)
+ local_gzip_path="$local_gzip_path/cmii"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/cmii/"
+ ltu
+ shift # past argument
+ ;;
+ *)
+ # unknown option
+ print_red "bad arguments"
+ ;;
+ esac
+ done
+
+}
+
+ltu(){
+ all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
+
+ for file in $all_file_list; do
+ echo "offline gzip file is => : $file"
+ image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
+
+ docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
+
+ app_name=$(echo "$image_full_name" | sed 's|.*/||g')
+ echo "extract short name is $app_name"
+
+ if echo $image_full_name | grep -q "rancher"
+ then
+ print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
+ docker push $DockerRegisterDomain/rancher/$app_name
+ else
+ print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
+ docker push $DockerRegisterDomain/cmii/$app_name
+ fi
+ done
+}
+
+
+test(){
+ app_name=$(echo "nginx:latest" | sed 's|.*/||g')
+ echo "extract short name is $app_name"
+}
+
+# test
+Download_Load_Tag_Upload "rke"
+
+# Load_Tag_Upload "cmii"
\ No newline at end of file
diff --git a/71-202601-XA监管平台/cmii-update.sh b/71-202601-XA监管平台/cmii-update.sh
new file mode 100644
index 0000000..3065045
--- /dev/null
+++ b/71-202601-XA监管平台/cmii-update.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+harbor_host=10.22.57.8:8033
+namespace=xa-dcity-uas-260116
+app_name=""
+new_tag=""
+
+download_from_oss() {
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+
+ echo "start to download => $1"
+ wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
+
+ echo ""
+ echo ""
+}
+
+upload_image_to_harbor(){
+ if [ "$app_name" == "" ]; then
+ echo "app name null exit!"
+ exit 233
+ fi
+
+ if ! docker load < "$1"; then
+ echo "docker load error !"
+ fi
+ docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+ echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
+ docker login -u admin -p V2ryStr@ngPss $harbor_host
+ docker push "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+
+}
+
+parse_args(){
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+ local image_name="$1"
+
+ # cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
+ app_name=$(echo $image_name | cut -d "=" -f1)
+ new_tag=$(echo $image_name | cut -d "=" -f2)
+}
+
+update_image_tag(){
+ if [ "$new_tag" == "" ]; then
+ echo "new tag error!"
+ exit 233
+ fi
+
+ local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
+
+ echo "image grep is => ${image_prefix}"
+
+ echo "start to update ${namespace} ${app_name} to ${new_tag} !"
+ echo ""
+ kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
+ echo ""
+ echo "start to wait for 3 seconds!"
+ sleep 3
+ local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
+ echo ""
+ echo "new image are => $image_new"
+ echo ""
+}
+
+main(){
+ parse_args "$1"
+ download_from_oss "$1"
+ upload_image_to_harbor "$1"
+ update_image_tag
+}
+
+main "$@"
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-be-configmap.yaml b/71-202601-XA监管平台/doris-deploy/doris-be-configmap.yaml
new file mode 100644
index 0000000..61754c2
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-be-configmap.yaml
@@ -0,0 +1,82 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: doris-cluster-be-conf
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: be
+data:
+ be.conf: >
+ CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+ # Log dir
+ LOG_DIR="${DORIS_HOME}/log/"
+
+ # For jdk 8
+ JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
+
+ # Set your own JAVA_HOME
+ # JAVA_HOME=/path/to/jdk/
+
+ # https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
+ # https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
+ JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
+ JEMALLOC_PROF_PRFIX=""
+
+ # ports for admin, web, heartbeat service
+ be_port = 9060
+ webserver_port = 8040
+ heartbeat_service_port = 9050
+ brpc_port = 8060
+ arrow_flight_sql_port = -1
+
+ # HTTPS configures
+ enable_https = false
+ # path of certificate in PEM format.
+ #ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
+ # path of private key in PEM format.
+ #ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
+
+ # Choose one if there are more than one ip except loopback address.
+ # Note that there should at most one ip match this list.
+ # If no ip match this rule, will choose one randomly.
+ # use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
+ # Default value is empty.
+ # priority_networks = 10.10.10.0/24;192.168.0.0/16
+
+ # data root path, separate by ';'
+ # You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
+ # eg:
+ # storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
+ # storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
+ # /home/disk2/doris,medium:HDD(default)
+ #
+ # you also can specify the properties by setting ':', separate by ','
+ # property 'medium' has a higher priority than the extension of path
+ #
+ # Default value is ${DORIS_HOME}/storage, you should create it by hand.
+ # storage_root_path = ${DORIS_HOME}/storage
+
+ # Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
+ # jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
+
+ # Advanced configurations
+ # INFO, WARNING, ERROR, FATAL
+ sys_log_level = INFO
+ # sys_log_roll_mode = SIZE-MB-1024
+ # sys_log_roll_num = 10
+ # sys_log_verbose_modules = *
+ # log_buffer_level = -1
+
+ # aws sdk log level
+ # Off = 0,
+ # Fatal = 1,
+ # Error = 2,
+ # Warn = 3,
+ # Info = 4,
+ # Debug = 5,
+ # Trace = 6
+ # Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
+ #aws_log_level=0
+ ## If you are not running in aws cloud, you can disable EC2 metadata
+ #AWS_EC2_METADATA_DISABLED=false
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-be-internal-service.yaml b/71-202601-XA监管平台/doris-deploy/doris-be-internal-service.yaml
new file mode 100644
index 0000000..10009e1
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-be-internal-service.yaml
@@ -0,0 +1,17 @@
+kind: Service
+apiVersion: v1
+metadata:
+ namespace: xa-dcity-uas-260116
+ name: doris-cluster-be-internal
+ labels:
+ app.kubernetes.io/component: doris-cluster-be-internal
+spec:
+ ports:
+ - name: heartbeat-port
+ protocol: TCP
+ port: 9050
+ targetPort: 9050
+ selector:
+ app.kubernetes.io/component: doris-cluster-be
+ clusterIP: None
+ type: ClusterIP
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-be-service.yaml b/71-202601-XA监管平台/doris-deploy/doris-be-service.yaml
new file mode 100644
index 0000000..da3db7e
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-be-service.yaml
@@ -0,0 +1,32 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-be-service
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+spec:
+ ports:
+ - name: be-port
+ protocol: TCP
+ port: 9060
+ targetPort: 9060
+ nodePort: 32189
+ - name: webserver-port
+ protocol: TCP
+ port: 8040
+ targetPort: 8040
+ nodePort: 31624
+ - name: heartbeat-port
+ protocol: TCP
+ port: 9050
+ targetPort: 9050
+ nodePort: 31625
+ - name: brpc-port
+ protocol: TCP
+ port: 8060
+ targetPort: 8060
+ nodePort: 31627
+ selector:
+ app.kubernetes.io/component: doris-cluster-be
+ type: NodePort
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-be-statusfulset.yaml b/71-202601-XA监管平台/doris-deploy/doris-be-statusfulset.yaml
new file mode 100644
index 0000000..65113e0
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-be-statusfulset.yaml
@@ -0,0 +1,223 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: doris-cluster-be
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: doris-cluster-be
+ template:
+ metadata:
+ name: doris-cluster-be
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+ spec:
+ # 确保 hostname 和 subdomain 正确设置
+ hostname: $(POD_NAME)
+ subdomain: doris-cluster-be-internal
+ imagePullSecrets:
+ - name: harborsecret
+ volumes:
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: labels
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ - path: annotations
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.annotations
+ defaultMode: 420
+ - name: doris-cluster-be-conf
+ configMap:
+ name: doris-cluster-be-conf
+ defaultMode: 420
+ - name: be-storage
+ persistentVolumeClaim:
+ claimName: doris-be-storage-pvc
+ - name: be-log
+ persistentVolumeClaim:
+ claimName: doris-fe-log-pvc
+ initContainers:
+ - name: default-init
+ image: '10.22.57.8:8033/cmii/alpine:3.23.0'
+ command:
+ - /bin/sh
+ args:
+ - '-c'
+ - sysctl -w vm.max_map_count=2000000 ; swapoff -a ; ulimit -n 655350
+ resources:
+ limits:
+ cpu: '1'
+ memory: 1Gi
+ requests:
+ cpu: '0.5'
+ memory: 500Mi
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ containers:
+ - name: be
+ # 添加 securityContext 提升文件描述符限制
+ securityContext:
+ capabilities:
+ add:
+ - SYS_RESOURCE
+ - IPC_LOCK
+ runAsUser: 0
+ image: '10.22.57.8:8033/cmii/doris.be-ubuntu:2.1.6'
+ command:
+ - /opt/apache-doris/be_entrypoint.sh
+ args:
+ - $(ENV_FE_ADDR)
+ ports:
+ - name: be-port
+ containerPort: 9060
+ protocol: TCP
+ - name: webserver-port
+ containerPort: 8040
+ protocol: TCP
+ - name: heartbeat-port
+ containerPort: 9050
+ protocol: TCP
+ - name: brpc-port
+ containerPort: 8060
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CONFIGMAP_MOUNT_PATH
+ value: /etc/doris
+ - name: USER
+ value: root
+ - name: DORIS_ROOT
+ value: /opt/apache-doris
+ - name: ENV_FE_ADDR
+ value: doris-cluster-fe-service
+ - name: FE_QUERY_PORT
+ value: '9030'
+ resources:
+ limits:
+ cpu: '4'
+ memory: 4Gi
+ requests:
+ cpu: '1'
+ memory: 2Gi
+ volumeMounts:
+ - name: podinfo
+ mountPath: /etc/podinfo
+ - name: be-storage
+ mountPath: /opt/apache-doris/be/storage
+ - name: be-log
+ mountPath: /opt/apache-doris/be/log
+ - name: doris-cluster-be-conf
+ mountPath: /etc/doris
+ livenessProbe:
+ tcpSocket:
+ port: 9050
+ initialDelaySeconds: 80
+ timeoutSeconds: 180
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 8040
+ scheme: HTTP
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ tcpSocket:
+ port: 9050
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 60
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/apache-doris/be_prestop.sh
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: doris-deploy
+ operator: In
+ values:
+ - "true"
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values:
+ - doris-cluster-be
+ topologyKey: kubernetes.io/hostname
+ schedulerName: default-scheduler
+# volumeClaimTemplates:
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: be-storage
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: '10'
+# storageClassName: nfs-prod-distribute
+# volumeMode: Filesystem
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: be-log
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: '10'
+# storageClassName: nfs-prod-distribute
+# volumeMode: Filesystem
+ serviceName: doris-cluster-be-internal
+ podManagementPolicy: Parallel
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-fe-configmap.yaml b/71-202601-XA监管平台/doris-deploy/doris-fe-configmap.yaml
new file mode 100644
index 0000000..33803b1
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-fe-configmap.yaml
@@ -0,0 +1,67 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: doris-cluster-fe-conf
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: fe
+data:
+ fe.conf: |
+ #####################################################################
+ ## The uppercase properties are read and exported by bin/start_fe.sh.
+ ## To see all Frontend configurations,
+ ## see fe/src/org/apache/doris/common/Config.java
+ #####################################################################
+
+ CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+ # Log dir
+ LOG_DIR = ${DORIS_HOME}/log
+
+ # For jdk 8
+ JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
+
+ # Set your own JAVA_HOME
+ # JAVA_HOME=/path/to/jdk/
+
+ ##
+ ## the lowercase properties are read by main program.
+ ##
+
+ # store metadata, must be created before start FE.
+ # Default value is ${DORIS_HOME}/doris-meta
+ # meta_dir = ${DORIS_HOME}/doris-meta
+
+ # Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
+ # jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
+
+ http_port = 8030
+ rpc_port = 9020
+ query_port = 9030
+ edit_log_port = 9010
+ arrow_flight_sql_port = -1
+
+ # Choose one if there are more than one ip except loopback address.
+ # Note that there should at most one ip match this list.
+ # If no ip match this rule, will choose one randomly.
+ # use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
+ # Default value is empty.
+ # priority_networks = 10.10.10.0/24;192.168.0.0/16
+
+ # Advanced configurations
+ # log_roll_size_mb = 1024
+ # INFO, WARN, ERROR, FATAL
+ sys_log_level = INFO
+ # NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式,日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
+ sys_log_mode = ASYNC
+ # sys_log_roll_num = 10
+ # sys_log_verbose_modules = org.apache.doris
+ # audit_log_dir = $LOG_DIR
+ # audit_log_modules = slow_query, query
+ # audit_log_roll_num = 10
+ # meta_delay_toleration_second = 10
+ # qe_max_connection = 1024
+ # qe_query_timeout_second = 300
+ # qe_slow_log_ms = 5000
+ #Fully Qualified Domain Name,完全限定域名,开启后各节点之间通信基于FQDN
+ enable_fqdn_mode = true
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-fe-internal-service.yaml b/71-202601-XA监管平台/doris-deploy/doris-fe-internal-service.yaml
new file mode 100644
index 0000000..5d9afc3
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-fe-internal-service.yaml
@@ -0,0 +1,17 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-fe-internal
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ ports:
+ - name: query-port
+ protocol: TCP
+ port: 9030
+ targetPort: 9030
+ selector:
+ app.kubernetes.io/component: doris-cluster-fe
+ clusterIP: None
+ type: ClusterIP
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-fe-service.yaml b/71-202601-XA监管平台/doris-deploy/doris-fe-service.yaml
new file mode 100644
index 0000000..a598587
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-fe-service.yaml
@@ -0,0 +1,32 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-fe-service
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ ports:
+ - name: http-port
+ protocol: TCP
+ port: 8030
+ targetPort: 8030
+ nodePort: 31620
+ - name: rpc-port
+ protocol: TCP
+ port: 9020
+ targetPort: 9020
+ nodePort: 31621
+ - name: query-port
+ protocol: TCP
+ port: 9030
+ targetPort: 9030
+ nodePort: 31622
+ - name: edit-log-port
+ protocol: TCP
+ port: 9010
+ targetPort: 9010
+ nodePort: 31623
+ selector:
+ app.kubernetes.io/component: doris-cluster-fe
+ type: NodePort
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-fe-statusfulset.yaml b/71-202601-XA监管平台/doris-deploy/doris-fe-statusfulset.yaml
new file mode 100644
index 0000000..661cffe
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-fe-statusfulset.yaml
@@ -0,0 +1,198 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: doris-cluster-fe
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: doris-cluster-fe
+ template:
+ metadata:
+ name: doris-cluster-fe
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ volumes:
+ - name: meta
+ persistentVolumeClaim:
+# claimName: meta
+ claimName: doris-fe-meta-pvc
+ - name: log
+ persistentVolumeClaim:
+ # claimName: meta
+ claimName: doris-fe-log-pvc
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: labels
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ - path: annotations
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.annotations
+ defaultMode: 420
+ - name: doris-cluster-fe-conf
+ configMap:
+ name: doris-cluster-fe-conf
+ defaultMode: 420
+ containers:
+ - name: doris-cluster-fe
+ image: '10.22.57.8:8033/cmii/doris.fe-ubuntu:2.1.6'
+ command:
+ - /opt/apache-doris/fe_entrypoint.sh
+ args:
+ - $(ENV_FE_ADDR)
+ ports:
+ - name: http-port
+ containerPort: 8030
+ protocol: TCP
+ - name: rpc-port
+ containerPort: 9020
+ protocol: TCP
+ - name: query-port
+ containerPort: 9030
+ protocol: TCP
+ - name: edit-log-port
+ containerPort: 9010
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CONFIGMAP_MOUNT_PATH
+ value: /etc/doris
+ - name: USER
+ value: root
+ - name: DORIS_ROOT
+ value: /opt/apache-doris
+ - name: ENV_FE_ADDR
+ value: doris-cluster-fe-service
+ - name: FE_QUERY_PORT
+ value: '9030'
+ - name: ELECT_NUMBER
+ value: '3'
+ resources:
+ limits:
+ cpu: '4'
+ memory: 4Gi
+ requests:
+ cpu: '1'
+ memory: 2Gi
+ volumeMounts:
+ - name: podinfo
+ mountPath: /etc/podinfo
+ - name: log
+ mountPath: /opt/apache-doris/fe/log
+ - name: meta
+ mountPath: /opt/apache-doris/fe/doris-meta
+ - name: doris-cluster-fe-conf
+ mountPath: /etc/doris
+ livenessProbe:
+ tcpSocket:
+ port: 9030
+ initialDelaySeconds: 80
+ timeoutSeconds: 180
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 8030
+ scheme: HTTP
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ tcpSocket:
+ port: 9030
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 60
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/apache-doris/fe_prestop.sh
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: doris-deploy
+ operator: In
+ values:
+ - "true"
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values:
+ - doris-cluster-fe
+ topologyKey: kubernetes.io/hostname
+ schedulerName: default-scheduler
+# volumeClaimTemplates:
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: meta
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: 10G
+# storageClassName: hcms-efs-class
+# volumeMode: Filesystem
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: log
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: '10'
+# storageClassName: hcms-efs-class
+# volumeMode: Filesystem
+ serviceName: doris-cluster-fe-internal
+ podManagementPolicy: Parallel
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/doris-pvc.yaml b/71-202601-XA监管平台/doris-deploy/doris-pvc.yaml
new file mode 100644
index 0000000..cca7526
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/doris-pvc.yaml
@@ -0,0 +1,60 @@
+---
+# pvc.yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-fe-meta-pvc
+ namespace: xa-dcity-uas-260116
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Gi
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-fe-log-pvc
+ namespace: xa-dcity-uas-260116
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Gi
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-be-storage-pvc
+ namespace: xa-dcity-uas-260116
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Gi # 根据实际存储需求调整
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-be-log-pvc
+ namespace: xa-dcity-uas-260116
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Gi
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris-deploy/修改pvc-然后statefulset中的image.txt b/71-202601-XA监管平台/doris-deploy/修改pvc-然后statefulset中的image.txt
new file mode 100644
index 0000000..ca73f54
--- /dev/null
+++ b/71-202601-XA监管平台/doris-deploy/修改pvc-然后statefulset中的image.txt
@@ -0,0 +1,31 @@
+
+
+修改PVC文件
+修改全部的NAMESPACE
+修改statefulset里面的IMAGE
+
+
+# BE出现下面的错误
+like
+ * soft nofile 655350
+ * hard nofile 655350
+and then run 'ulimit -n 655350' to take effect on current session.
+
+需要修改 ContainerD的Service
+
+/lib/systemd/system/containerd.service
+
+[Service]
+LimitNOFILE=655350 <<<
+LimitNPROC=infinity
+LimitMEMLOCK=infinity
+EOF
+
+# 2. 重新加载并重启 containerd
+sudo systemctl daemon-reload
+sudo systemctl restart containerd
+
+# 3. 验证 containerd 的限制
+ps aux | grep containerd | grep -v grep
+cat /proc/$(pgrep containerd | head -1)/limits | grep "open files"
+# 应该显示: Max open files 655350 655350 files
\ No newline at end of file
diff --git a/71-202601-XA监管平台/doris数据同步/doris-data-import.sh b/71-202601-XA监管平台/doris数据同步/doris-data-import.sh
new file mode 100644
index 0000000..c637b9c
--- /dev/null
+++ b/71-202601-XA监管平台/doris数据同步/doris-data-import.sh
@@ -0,0 +1,634 @@
+#!/usr/bin/env bash
+#===============================================================================
+# Author: Smith Wang
+# Version: 1.0.0
+# License: MIT
+# Filename: doris_csv_stream_load.sh
+#
+# Description:
+# 1) Use wget to download one or multiple CSV files (resume + size verification)
+# 2) Import into Apache Doris via Stream Load API using curl
+#
+# Module dependencies:
+# - bash (>= 5.0)
+# - wget
+# - curl
+# - awk, sed, grep, stat, date, mktemp
+#
+# ShellCheck:
+# shellcheck -x doris_csv_stream_load.sh
+#===============================================================================
+
+set -euo pipefail
+IFS=$'\n\t'
+
+#===============================================================================
+# Global Constants
+#===============================================================================
+readonly SCRIPT_NAME="$(basename "$0")"
+readonly SCRIPT_VERSION="1.0.0"
+readonly DEFAULT_WORKDIR="./doris_csv_downloads"
+readonly DEFAULT_NATIONAL_DIR_URL="https://oss.demo.uavcmlc.com/cmlc-installation/doris/all"
+readonly DEFAULT_NATIONAL_COUNT="6" # suffix 0..5
+readonly DEFAULT_REGION_URL="https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/xiongan.csv"
+
+# Doris defaults (override by args)
+readonly DEFAULT_DORIS_USER="root"
+readonly DEFAULT_DORIS_PASS="" # empty by default (root:)
+readonly DEFAULT_DORIS_BE_PORT="8040" # Stream Load port
+
+# wget/curl behavior
+readonly WGET_RETRIES="10"
+readonly WGET_TIMEOUT_SEC="30"
+readonly CURL_TIMEOUT_SEC="600" # per file; adjust if needed
+readonly LOCKFILE="/tmp/${SCRIPT_NAME}.lock"
+
+#===============================================================================
+# Runtime Config (set by args)
+#===============================================================================
+ACTION="all" # download|load|all
+WORKDIR="${DEFAULT_WORKDIR}"
+
+NATIONAL_DIR_URL="${DEFAULT_NATIONAL_DIR_URL}"
+NATIONAL_PREFIX="" # REQUIRED for national mode
+NATIONAL_COUNT="${DEFAULT_NATIONAL_COUNT}"
+
+REGION_URL="${DEFAULT_REGION_URL}"
+
+DORIS_BE_IP="" # REQUIRED
+DORIS_BE_PORT="${DEFAULT_DORIS_BE_PORT}"
+DORIS_USER="${DEFAULT_DORIS_USER}"
+DORIS_PASS="${DEFAULT_DORIS_PASS}"
+DORIS_DB="cmii"
+DORIS_TABLE="dwd_reg_grid_city_detail_dd"
+
+COLUMN_SEPARATOR=","
+
+# Derived
+DOWNLOAD_LIST_FILE=""
+STREAMLOAD_LOG_DIR=""
+
+#===============================================================================
+# ASCII Call Graph
+#===============================================================================
+# main
+# ├─ acquire_lock
+# ├─ parse_args
+# ├─ validate_env
+# ├─ prepare_workdir
+# ├─ build_download_list
+# ├─ run_downloads
+# │ ├─ get_remote_size_bytes
+# │ ├─ download_one
+# │ └─ verify_file_size
+# ├─ run_stream_load
+# │ ├─ stream_load_one
+# │ └─ parse_stream_load_response
+# └─ release_lock (trap)
+#===============================================================================
+
+#===============================================================================
+# Logging
+#===============================================================================
+LOG_LEVEL="INFO" # DEBUG|INFO|WARN|ERROR
+
+### Print log line with level
+# @param level string Log level
+# @param msg string Message
+# @return 0 Success
+log() {
+ local level="$1"
+ local msg="$2"
+ local ts
+ ts="$(date '+%Y-%m-%d %H:%M:%S')"
+ >&2 printf '%s [%s] %s: %s\n' "$ts" "$level" "$SCRIPT_NAME" "$msg"
+}
+
+### Debug log
+# @param msg string Message
+# @return 0 Success
+log_debug() { [[ "$LOG_LEVEL" == "DEBUG" ]] && log "DEBUG" "$1" || true; }
+
+### Info log
+# @param msg string Message
+# @return 0 Success
+log_info() { log "INFO" "$1"; }
+
+### Warn log
+# @param msg string Message
+# @return 0 Success
+log_warn() { log "WARN" "$1"; }
+
+### Error log
+# @param msg string Message
+# @return 0 Success
+log_error() { log "ERROR" "$1"; }
+
+#===============================================================================
+# Error / Cleanup
+#===============================================================================
+TMPDIR=""
+CLEANUP_FILES=()
+
+### Cleanup handler
+# @return 0 Always
+cleanup() {
+ local exit_code=$?
+ # > cleanup temp resources
+ if [[ -n "${TMPDIR}" && -d "${TMPDIR}" ]]; then
+ rm -rf "${TMPDIR}" || true
+ fi
+ # > release lock
+ release_lock || true
+
+ if [[ $exit_code -ne 0 ]]; then
+ log_error "Exiting with code ${exit_code}"
+ fi
+ exit "$exit_code"
+}
+
+trap cleanup EXIT INT TERM
+
+### Fail with message
+# @param msg string Error message
+# @return 1 Always
+die() {
+ log_error "$1"
+ return 1
+}
+
+#===============================================================================
+# Lock
+#===============================================================================
+### Acquire a simple lock to avoid concurrent runs
+# @require flock (optional) OR atomic mkdir (fallback)
+# @return 0 Success
+acquire_lock() {
+ # > Prefer mkdir lock for POSIX-ish behavior (no flock dependency)
+ if mkdir "${LOCKFILE}.d" 2>/dev/null; then
+ log_debug "Lock acquired: ${LOCKFILE}.d"
+ else
+ die "Another instance is running (lock exists: ${LOCKFILE}.d). Remove it if you're sure."
+ fi
+}
+
+### Release lock
+# @return 0 Success
+release_lock() {
+ if [[ -d "${LOCKFILE}.d" ]]; then
+ rmdir "${LOCKFILE}.d" 2>/dev/null || true
+ log_debug "Lock released: ${LOCKFILE}.d"
+ fi
+}
+
+#===============================================================================
+# Usage
+#===============================================================================
+### Print usage
+# @return 0 Success
+usage() {
+ cat <<'EOF'
+Usage:
+ doris_csv_stream_load.sh [download|load|all] [options]
+
+Actions:
+ download Only download CSVs (wget)
+ load Only stream-load existing CSVs in workdir
+ all Download then load (default)
+
+Options:
+ --workdir Download directory (default: ./doris_csv_downloads)
+ --log-level
+
+ # National files (suffix 0..5 by default)
+ --national-dir-url Base directory URL for national files
+ default: https://oss.demo.uavcmlc.com/cmlc-installation/doris/all
+ --national-prefix REQUIRED for national mode
+ e.g. result_2aee9754dd304ca1-a0651901906f9bb4
+ --national-count How many files, suffix 0..n-1 (default: 6)
+
+ # Optional single region file
+ --region-url default: https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/xiongan.csv
+ --no-region Skip region file
+
+ # Doris stream load config
+ --doris-be-ip REQUIRED
+ --doris-be-port default: 8040
+ --doris-user default: root
+ --doris-pass default: empty
+ --db default: cmii
+ --table default: dwd_reg_grid_city_detail_dd
+ --column-separator default: ,
+
+Examples:
+ # 1) All national(0..5) + region file, download then load:
+ ./doris_csv_stream_load.sh all \
+ --national-prefix result_2aee9754dd304ca1-a0651901906f9bb4 \
+ --doris-be-ip 10.10.10.10
+
+ # 2) Download only:
+ ./doris_csv_stream_load.sh download \
+ --national-prefix result_xxx \
+ --doris-be-ip 10.10.10.10
+
+ # 3) Load only (assumes files already in workdir):
+ ./doris_csv_stream_load.sh load \
+ --national-prefix result_xxx \
+ --doris-be-ip 10.10.10.10
+
+EOF
+}
+
+#===============================================================================
+# Args Parsing
+#===============================================================================
+SKIP_REGION="false"
+
+### Parse CLI arguments
+# @param args string[] Command line args
+# @return 0 Success
+parse_args() {
+ if [[ $# -ge 1 ]]; then
+ case "$1" in
+ download|load|all) ACTION="$1"; shift ;;
+ -h|--help) usage; exit 0 ;;
+ *) : ;;
+ esac
+ fi
+
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --workdir) WORKDIR="$2"; shift 2 ;;
+ --log-level) LOG_LEVEL="$2"; shift 2 ;;
+
+ --national-dir-url) NATIONAL_DIR_URL="$2"; shift 2 ;;
+ --national-prefix) NATIONAL_PREFIX="$2"; shift 2 ;;
+ --national-count) NATIONAL_COUNT="$2"; shift 2 ;;
+
+ --region-url) REGION_URL="$2"; shift 2 ;;
+ --no-region) SKIP_REGION="true"; shift 1 ;;
+
+ --doris-be-ip) DORIS_BE_IP="$2"; shift 2 ;;
+ --doris-be-port) DORIS_BE_PORT="$2"; shift 2 ;;
+ --doris-user) DORIS_USER="$2"; shift 2 ;;
+ --doris-pass) DORIS_PASS="$2"; shift 2 ;;
+ --db) DORIS_DB="$2"; shift 2 ;;
+ --table) DORIS_TABLE="$2"; shift 2 ;;
+ --column-separator) COLUMN_SEPARATOR="$2"; shift 2 ;;
+
+ -h|--help) usage; exit 0 ;;
+ *)
+ die "Unknown argument: $1"
+ ;;
+ esac
+ done
+}
+
+#===============================================================================
+# Validation / Environment
+#===============================================================================
+### Validate required tools and config
+# @require wget
+# @require curl
+# @require awk sed grep stat
+# @return 0 Success
+validate_env() {
+ command -v wget >/dev/null 2>&1 || die "wget not found"
+ command -v curl >/dev/null 2>&1 || die "curl not found"
+ command -v awk >/dev/null 2>&1 || die "awk not found"
+ command -v sed >/dev/null 2>&1 || die "sed not found"
+ command -v grep >/dev/null 2>&1 || die "grep not found"
+ command -v stat >/dev/null 2>&1 || die "stat not found"
+
+ [[ -n "${DORIS_BE_IP}" ]] || die "--doris-be-ip is required"
+ [[ -n "${NATIONAL_PREFIX}" ]] || die "--national-prefix is required (filename changes, must be provided)"
+ [[ "${NATIONAL_COUNT}" =~ ^[0-9]+$ ]] || die "--national-count must be an integer"
+}
+
+### Prepare working directory and temp (追加初始化日志目录)
+# @return 0 Success
+prepare_workdir() {
+ mkdir -p "${WORKDIR}"
+ TMPDIR="$(mktemp -d)"
+ DOWNLOAD_LIST_FILE="${TMPDIR}/download_list.txt"
+
+ # > stream load logs dir
+ STREAMLOAD_LOG_DIR="${WORKDIR}/_streamload_logs"
+ mkdir -p "${STREAMLOAD_LOG_DIR}"
+
+ log_debug "Workdir: ${WORKDIR}"
+ log_debug "StreamLoad log dir: ${STREAMLOAD_LOG_DIR}"
+}
+
+### Generate a local request id for tracing
+# @param csv_path string Local CSV file path
+# @return 0 Success (prints request id)
+gen_request_id() {
+ local csv_path="$1"
+ local ts
+ ts="$(date '+%Y%m%d_%H%M%S')"
+ # > sanitize filename for filesystem
+ local base
+ base="$(basename "${csv_path}" | sed 's/[^a-zA-Z0-9._-]/_/g')"
+ printf '%s__%s__%s' "${ts}" "$$" "${base}"
+}
+
+### Extract a JSON string field value without jq (best-effort)
+# @param json_file string JSON file path
+# @param field_name string Field name, e.g. Status
+# @return 0 Success (prints value or empty)
+json_get_string() {
+ local json_file="$1"
+ local field_name="$2"
+ # > naive parse: "Field":"value"
+ grep -Eo "\"${field_name}\"[[:space:]]*:[[:space:]]*\"[^\"]*\"" "${json_file}" \
+ | head -n1 \
+ | sed -E "s/.*\"${field_name}\"[[:space:]]*:[[:space:]]*\"([^\"]*)\".*/\1/" \
+ || true
+}
+
+### Extract a JSON numeric field value without jq (best-effort)
+# @param json_file string JSON file path
+# @param field_name string Field name, e.g. TxnId
+# @return 0 Success (prints value or empty)
+json_get_number() {
+ local json_file="$1"
+ local field_name="$2"
+ # > naive parse: "Field":12345
+ grep -Eo "\"${field_name}\"[[:space:]]*:[[:space:]]*[0-9]+" "${json_file}" \
+ | head -n1 \
+ | sed -E "s/.*\"${field_name}\"[[:space:]]*:[[:space:]]*([0-9]+).*/\1/" \
+ || true
+}
+
+
+#===============================================================================
+# Download List Builder
+#===============================================================================
+### Build download URL list into a file
+# @return 0 Success
+build_download_list() {
+ : > "${DOWNLOAD_LIST_FILE}"
+
+ # > National files: prefix_0..prefix_(count-1)
+ local i
+ for ((i=0; i< NATIONA L_COUNT; i++)); do
+ printf '%s/%s_%s.csv\n' "${NATIONAL_DIR_URL}" "${NATIONAL_PREFIX}" "${i}" >> "${DOWNLOAD_LIST_FILE}"
+ done
+
+ # > Optional region file
+ if [[ "${SKIP_REGION}" != "true" ]]; then
+ printf '%s\n' "${REGION_URL}" >> "${DOWNLOAD_LIST_FILE}"
+ fi
+
+ log_info "Download list prepared: $(wc -l < "${DOWNLOAD_LIST_FILE}") file(s)"
+ log_debug "Download list content:\n$(cat "${DOWNLOAD_LIST_FILE}")"
+}
+
+# NOTE: fix accidental space in variable name (ShellCheck would flag). Keep code correct:
+# We'll patch it by redefining function properly.
+
+build_download_list() {
+ : > "${DOWNLOAD_LIST_FILE}"
+
+ # > National files: prefix_0..prefix_(count-1)
+ local i
+ for ((i=0; i< NATIONAL_COUNT; i++)); do
+ printf '%s/%s_%s.csv\n' "${NATIONAL_DIR_URL}" "${NATIONAL_PREFIX}" "${i}" >> "${DOWNLOAD_LIST_FILE}"
+ done
+
+ # > Optional region file
+ if [[ "${SKIP_REGION}" != "true" ]]; then
+ printf '%s\n' "${REGION_URL}" >> "${DOWNLOAD_LIST_FILE}"
+ fi
+
+ log_info "Download list prepared: $(wc -l < "${DOWNLOAD_LIST_FILE}") file(s)"
+}
+
+#===============================================================================
+# Download / Verify
+#===============================================================================
+### Get remote content length (bytes) via wget --spider
+# @param url string Remote URL
+# @return 0 Success (prints size or empty if unknown)
+get_remote_size_bytes() {
+ local url="$1"
+ local size=""
+ # > wget spider to fetch headers
+ # Some servers may not provide Content-Length; handle gracefully.
+ local headers
+ headers="$(wget --spider --server-response --timeout="${WGET_TIMEOUT_SEC}" --tries=2 "${url}" 2>&1 || true)"
+
+ # Try to locate Content-Length
+ size="$(printf '%s\n' "${headers}" | awk -F': ' 'tolower($1)==" content-length" {gsub("\r","",$2); print $2}' | tail -n 1)"
+ if [[ -n "${size}" && "${size}" =~ ^[0-9]+$ ]]; then
+ printf '%s' "${size}"
+ else
+ printf '%s' ""
+ fi
+}
+
+### Get local file size in bytes
+# @param file_path string Local file path
+# @return 0 Success (prints size)
+get_local_size_bytes() {
+ local file_path="$1"
+ stat -c '%s' "${file_path}"
+}
+
+### Verify local file size equals remote (if remote size known)
+# @param file_path string Local file path
+# @param remote_size string Remote size bytes (may be empty)
+# @return 0 Success
+verify_file_size() {
+ local file_path="$1"
+ local remote_size="$2"
+
+ [[ -f "${file_path}" ]] || die "File not found: ${file_path}"
+
+ if [[ -z "${remote_size}" ]]; then
+ # > Cannot verify by size; at least ensure file is non-empty
+ local local_size
+ local_size="$(get_local_size_bytes "${file_path}")"
+ [[ "${local_size}" -gt 0 ]] || die "Downloaded file is empty: ${file_path}"
+ log_warn "Remote Content-Length missing; only checked non-empty: ${file_path} (${local_size} bytes)"
+ return 0
+ fi
+
+ local local_size
+ local_size="$(get_local_size_bytes "${file_path}")"
+ if [[ "${local_size}" != "${remote_size}" ]]; then
+ die "Size mismatch for ${file_path}: local=${local_size}, remote=${remote_size}"
+ fi
+ log_info "Verified: ${file_path} (size=${local_size} bytes)"
+}
+
+### Download a single URL into workdir with resume + retries
+# @param url string Remote URL
+# @return 0 Success
+download_one() {
+ local url="$1"
+ local filename
+ filename="$(basename "${url}")"
+ local out_path="${WORKDIR}/${filename}"
+
+ log_info "Downloading: ${url}"
+ local remote_size
+ remote_size="$(get_remote_size_bytes "${url}")"
+ if [[ -n "${remote_size}" ]]; then
+ log_debug "Remote size: ${remote_size} bytes for ${filename}"
+ else
+ log_warn "Remote size unknown (no Content-Length): ${url}"
+ fi
+
+ # > Use --continue for resume, --tries for retries, --timeout to avoid hanging
+ # > Use -O to ensure deterministic output path
+ wget --continue \
+ --tries="${WGET_RETRIES}" \
+ --timeout="${WGET_TIMEOUT_SEC}" \
+ --output-document="${out_path}" \
+ "${url}"
+
+ # > Ensure fully downloaded
+ verify_file_size "${out_path}" "${remote_size}"
+}
+
+### Download all from list file (must all succeed)
+# @param list_file string File contains URLs
+# @return 0 Success
+run_downloads() {
+ local list_file="$1"
+ [[ -f "${list_file}" ]] || die "Download list file not found: ${list_file}"
+
+ # > Read line by line (URL per line)
+ while IFS= read -r url; do
+ [[ -n "${url}" ]] || continue
+ download_one "${url}"
+ done < "${list_file}"
+
+ log_info "All downloads completed successfully."
+}
+
+### Parse Doris stream load response and decide success (增强:输出txn/label等)
+# @param resp_file string Response file path
+# @return 0 Success, 1 Failure
+parse_stream_load_response() {
+ local resp_file="$1"
+ [[ -f "${resp_file}" ]] || die "Response file not found: ${resp_file}"
+
+ local status message txn_id label load_rows filtered_rows load_bytes
+ status="$(json_get_string "${resp_file}" "Status")"
+ message="$(json_get_string "${resp_file}" "Message")"
+ txn_id="$(json_get_number "${resp_file}" "TxnId")"
+ label="$(json_get_string "${resp_file}" "Label")"
+ load_rows="$(json_get_number "${resp_file}" "NumberLoadedRows")"
+ filtered_rows="$(json_get_number "${resp_file}" "NumberFilteredRows")"
+ load_bytes="$(json_get_number "${resp_file}" "LoadBytes")"
+
+ # > structured summary (easy to grep)
+ log_info "StreamLoadResp status=${status:-N/A} txn_id=${txn_id:-N/A} label=${label:-N/A} loaded=${load_rows:-N/A} filtered=${filtered_rows:-N/A} bytes=${load_bytes:-N/A}"
+
+ if [[ "${status}" == "Success" ]]; then
+ log_info "Stream Load Success. Message=${message:-N/A}"
+ return 0
+ fi
+
+ log_error "Stream Load Failed. Status=${status:-Unknown} Message=${message:-N/A}"
+ log_error "Full response saved at: ${resp_file}"
+ return 1
+}
+
+
+### Stream load a single CSV file to Doris (改:不再加 -H label;改用本地 request_id 追踪)
+# @param csv_path string Local CSV file path
+# @return 0 Success
+stream_load_one() {
+ local csv_path="$1"
+ [[ -f "${csv_path}" ]] || die "CSV not found: ${csv_path}"
+
+ local url="http://${DORIS_BE_IP}:${DORIS_BE_PORT}/api/${DORIS_DB}/${DORIS_TABLE}/_stream_load"
+
+ # > local trace id to correlate logs/response/files
+ local request_id
+ request_id="$(gen_request_id "${csv_path}")"
+
+ # > persist full response for tracing
+ local resp_file="${STREAMLOAD_LOG_DIR}/${request_id}.json"
+
+ log_info "Stream loading: ${csv_path} -> ${url} (request_id=${request_id})"
+ log_info "Response will be saved: ${resp_file}"
+
+ # > NOTE: do NOT set label header (per requirement)
+ curl --location-trusted \
+ --silent --show-error --fail-with-body \
+ --max-time "${CURL_TIMEOUT_SEC}" \
+ -u "${DORIS_USER}:${DORIS_PASS}" \
+ -H "Expect:100-continue" \
+ -H "column_separator:${COLUMN_SEPARATOR}" \
+ -T "${csv_path}" \
+ -X PUT \
+ "${url}" > "${resp_file}"
+
+ parse_stream_load_response "${resp_file}"
+}
+### Stream load all CSVs in workdir that match prefix list
+# @param list_file string Download list file (to know exact filenames)
+# @return 0 Success
+run_stream_load() {
+ local list_file="$1"
+ [[ -f "${list_file}" ]] || die "Download list file not found: ${list_file}"
+
+ # > Ensure all expected files exist before loading
+ while IFS= read -r url; do
+ [[ -n "${url}" ]] || continue
+ local filename
+ filename="$(basename "${url}")"
+ local csv_path="${WORKDIR}/${filename}"
+ [[ -f "${csv_path}" ]] || die "Expected CSV missing (download not complete?): ${csv_path}"
+ done < "${list_file}"
+ log_info "All expected CSV files exist. Starting Stream Load..."
+
+ # > Load in the same order as list
+ while IFS= read -r url; do
+ [[ -n "${url}" ]] || continue
+ local filename
+ filename="$(basename "${url}")"
+ stream_load_one "${WORKDIR}/${filename}"
+ done < "${list_file}"
+
+ log_info "All Stream Load operations finished."
+}
+
+#===============================================================================
+# Main
+#===============================================================================
+main() {
+ acquire_lock
+ parse_args "$@"
+ validate_env
+ prepare_workdir
+ build_download_list
+
+ case "${ACTION}" in
+ download)
+ # > Download only
+ run_downloads "${DOWNLOAD_LIST_FILE}"
+ ;;
+ load)
+ # > Load only (expects files already present)
+ run_stream_load "${DOWNLOAD_LIST_FILE}"
+ ;;
+ all)
+ # > Download then load
+ run_downloads "${DOWNLOAD_LIST_FILE}"
+ run_stream_load "${DOWNLOAD_LIST_FILE}"
+ ;;
+ *)
+ die "Invalid action: ${ACTION}"
+ ;;
+ esac
+
+ log_info "Done. (version=${SCRIPT_VERSION})"
+}
+
+main "$@"
diff --git a/71-202601-XA监管平台/doris数据同步/同步脚本.md b/71-202601-XA监管平台/doris数据同步/同步脚本.md
new file mode 100644
index 0000000..572c93a
--- /dev/null
+++ b/71-202601-XA监管平台/doris数据同步/同步脚本.md
@@ -0,0 +1,37 @@
+请以Bash Shell脚本高级开发工程师的身份,严格遵循以下编程规范实现指定功能:
+
+1. 代码结构规范
+- 符合POSIX标准与Bash最佳实践(v5.0+)
+- 实现清晰的模块划分和函数封装
+- 采用防御性编程策略处理异常情况
+- 包含完善的错误处理机制(trap、set -euo pipefail)
+2. 函数设计标准
+- 函数声明需包含: 功能描述段(使用###注释块) 参数说明:@param <变量名> <数据类型> <用途说明> 返回值说明:@return <退出码> <状态描述> 环境依赖:@require <依赖项>
+- 函数参数命名采用snake_case格式,体现语义化特征
+3. 文档规范
+- 主脚本头部包含: 元数据声明(作者、版本、许可证) 全局常量定义区 模块依赖说明
+- 关键算法步骤添加行内注释(# > 开头)
+- 维护完整的函数调用关系图(使用ASCII流程图)
+4. 质量保障
+- 通过ShellCheck进行静态检测
+- 统一的日志函数,实现详细的日志分级输出(DEBUG/INFO/WARN/ERROR)
+
+
+
+
+1、在一台能访问doris的服务器下载csv文件
+2、修改以下指令的变量文件名、doris ip和port,执行就导入完成了
+
+全国数据共6个文件,5.6G,后缀从0到5
+csv文件地址:https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/result_2aee9754dd304ca1-a0651901906f9bb4_0.csv
+
+单独地域的文件 https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/xiongan.csv
+
+
+导入指令:curl --location-trusted -u root: -H "Expect:100-continue" -H "column_separator:," -T ${table_name}.csv -XPUT http://${doris_be_ip}:${doris_be_8040_port}/api/cmii/dwd_reg_grid_city_detail_dd/_stream_load
+
+使用WGET下载,需要确保所有文件全部下载完成才能进行导入
+result_2aee9754dd304ca1-a0651901906f9bb4_0.csv 此名称可能变化,需要作为变量提出
+
+DORIS的地址、端口等需要作为公共变量提出
+
diff --git a/71-202601-XA监管平台/hosts.txt b/71-202601-XA监管平台/hosts.txt
new file mode 100644
index 0000000..1e105a5
--- /dev/null
+++ b/71-202601-XA监管平台/hosts.txt
@@ -0,0 +1,8 @@
+10.22.57.8 master-10-22-57-8
+10.22.57.5 worker-1-10-22-57-5
+10.22.57.6 worker-2-10-22-57-6
+10.22.57.7 worker-3-10-22-57-7
+10.22.57.3 worker-4-10-22-57-3
+10.22.57.4 worker-5-10-22-57-4
+
+hostnamectl set-hostname
\ No newline at end of file
diff --git a/71-202601-XA监管平台/k8s-admin-token.txt b/71-202601-XA监管平台/k8s-admin-token.txt
new file mode 100644
index 0000000..7f4f968
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-admin-token.txt
@@ -0,0 +1 @@
+eyJhbGciOiJSUzI1NiIsImtpZCI6IlBtUkhncHJ6T0ZUYnItM0VnMXpVUlZCWllOSVZWZ0Y0WEN3Sk5KUkoxY2MifQ.eyJhdWQiOlsidW5rbm93biJdLCJleHAiOjE4NjMzOTczMDYsImlhdCI6MTc2ODc4OTMwNiwiaXNzIjoicmtlIiwianRpIjoiOWQxNjM2MTktZDhjNi00MjQzLTgxN2MtMGYzOWYxYzFlNGExIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiNmUyOTQyM2QtZTYxNC00ZDdhLWI0MjAtNTVkZGQwMjZhYWQ4In19LCJuYmYiOjE3Njg3ODkzMDYsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.FSTYBTJhdrPz_dfcok_QTJCCX_u9k2iCmzkMwrE2NaxyEGFzbSoa8rpqN1yX6fO1GPfJX1UwAqsXqVXSEFOyCbdKt702zWonkUhIqJg7E48E8807Ta6Dc1QGNH4UHxuheFCPlqf9ZoTVF9lV0oWFHdxT-b5Z_ZuTshJ7pHE7AuZgOtLvzdpp4qU7CbMPWfkPUMh85fE9kzOCpbJsrN1ccnTMfjAJ8u1w_rq9Lzk2IqiuaiwSD95dNJAjKWdt8qgtDUbAbrRja_o5BdPzPuaEHRSI-aNNMgguxovvmGs7GX0mYqLlsU-SPRlkDU02Td_7Av48Ckh1IBvkHIKoAbhZgw
\ No newline at end of file
diff --git a/71-202601-XA监管平台/k8s-yaml/helm-minio.yaml b/71-202601-XA监管平台/k8s-yaml/helm-minio.yaml
new file mode 100644
index 0000000..98e02fe
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/helm-minio.yaml
@@ -0,0 +1,79 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ namespace: xa-dcity-uas-260116
+ name: helm-minio
+spec:
+ serviceName: helm-minio
+ replicas: 1
+ selector:
+ matchLabels:
+ app: helm-minio
+ template:
+ metadata:
+ labels:
+ app: helm-minio
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: minio-deploy
+ operator: In
+ values:
+ - "true"
+ containers:
+ - name: minio
+ image: 10.22.57.8:8033/cmii/minio:RELEASE.2023-06-02T23-17-26Z
+ command: ["/bin/sh", "-c"]
+ args:
+ - minio server /data --console-address ":9001"
+ ports:
+ - containerPort: 9000
+ name: api
+ - containerPort: 9001
+ name: console
+ env:
+ - name: MINIO_ACCESS_KEY
+ value: "cmii"
+ - name: MINIO_SECRET_KEY
+ value: "B#923fC7mk"
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ volumes:
+ - name: data
+# persistentVolumeClaim:
+# claimName: helm-minio
+ hostPath:
+ path: /var/lib/docker/minio-pv/
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-minio
+ namespace: xa-dcity-uas-260116
+spec:
+ selector:
+ app: helm-minio
+ ports:
+ - name: api
+ port: 9000
+ targetPort: 9000
+ nodePort: 39000
+ - name: console
+ port: 9001
+ targetPort: 9001
+ nodePort: 39001
+ type: NodePort
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-backend.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-backend.yaml
new file mode 100644
index 0000000..58f0a28
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-backend.yaml
@@ -0,0 +1,1575 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-iot-dispatcher
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-iot-dispatcher
+ image: 10.22.57.8:8033/cmii/cmii-uav-iot-dispatcher:2.2.0-pro-20251104
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uav-iot-dispatcher
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-uav-iot-dispatcher:2.2.0-pro-20251104
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-uav-iot-dispatcher
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-iot-dispatcher
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-sense-adapter
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-sense-adapter
+ image: 10.22.57.8:8033/cmii/cmii-uav-sense-adapter:2.2.0-pro-20251031
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uav-sense-adapter
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-uav-sense-adapter:2.2.0-pro-20251031
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-uav-sense-adapter
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-sense-adapter
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-live-oerator
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-live-oerator
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-live-oerator
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-live-oerator
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-live-oerator
+ image: 10.22.57.8:8033/cmii/cmii-live-oerator:5.2.0
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-live-oerator
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-live-oerator:5.2.0
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-live-oerator
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-live-oerator
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-live-oerator
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-live-oerator
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-live-oerator
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-live-oerator
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-live-oerator
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-live-oerator
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-gateway
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 0
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-gateway
+ image: 10.22.57.8:8033/cmii/cmii-uas-gateway:2.2.0-pro-20251031
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uas-gateway
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-uas-gateway:2.2.0-pro-20251031
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-uas-gateway
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-gateway
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-perception-live
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-perception-live
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-perception-live
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-perception-live
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-perception-live
+ image: 10.22.57.8:8033/cmii/cmii-uas-perception-live:2.2.0-pro-20251031
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uas-perception-live
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-uas-perception-live:2.2.0-pro-20251031
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-perception-live
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-perception-live
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-perception-live
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-perception-live
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-uas-perception-live
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-perception-live
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-perception-live
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-perception-live
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-material-warehouse
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-material-warehouse
+ image: 10.22.57.8:8033/cmii/cmii-uav-material-warehouse:2.2.0-pro-20251104
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uav-material-warehouse
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-uav-material-warehouse:2.2.0-pro-20251104
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-uav-material-warehouse
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-material-warehouse
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uavms-pyfusion
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uavms-pyfusion
+ image: 10.22.57.8:8033/cmii/cmii-uavms-pyfusion:6.3.6
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uavms-pyfusion
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-uavms-pyfusion:6.3.6
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-uavms-pyfusion
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uavms-pyfusion
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-inference-hub
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-inference-hub
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-inference-hub
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-inference-hub
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-inference-hub
+ image: 10.22.57.8:8033/cmii/cmii-inference-hub:2.2.0-pro-20251031
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-inference-hub
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-inference-hub:2.2.0-pro-20251031
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-inference-hub
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-inference-hub
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-inference-hub
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-inference-hub
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-inference-hub
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-inference-hub
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-inference-hub
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-inference-hub
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-lifecycle
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 0
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-lifecycle
+ image: 10.22.57.8:8033/cmii/cmii-uas-lifecycle:2.2.0-pro-20251120
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uas-lifecycle
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.2
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.2
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 10.22.57.8:8033/cmii/cmii-uas-lifecycle:2.2.0-pro-20251120
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xa-dcity-uas-260116/cmii-uas-lifecycle
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-lifecycle
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-configmap.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-configmap.yaml
new file mode 100644
index 0000000..1331c1c
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-configmap.yaml
@@ -0,0 +1,672 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: xa-dcity-uas-260116
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "144.7.97.167:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-dashboard.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-dashboard.yaml
new file mode 100644
index 0000000..82d3fbc
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-dashboard.yaml
@@ -0,0 +1,315 @@
+---
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: 10.22.57.8:8033/cmii/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: 10.22.57.8:8033/cmii/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-emqx-5.8.8-OK.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-emqx-5.8.8-OK.yaml
new file mode 100644
index 0000000..46aea3e
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-emqx-5.8.8-OK.yaml
@@ -0,0 +1,639 @@
+---
+# ============== Secret - 密码管理 ==============
+apiVersion: v1
+kind: Secret
+metadata:
+ name: emqx-credentials
+ namespace: xa-dcity-uas-260116
+type: Opaque
+stringData:
+ # Dashboard管理员密码
+ dashboard-admin-password: "odD8#Ve7.B"
+ # MQTT用户密码
+ mqtt-admin-password: "odD8#Ve7.B"
+
+---
+# ============== ServiceAccount ==============
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+
+---
+# ============== Role - RBAC ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+rules:
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - watch
+ - list
+
+---
+# ============== RoleBinding ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+
+---
+# ============== ConfigMap - Bootstrap配置文件 ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-config
+ namespace: xa-dcity-uas-260116
+data:
+ # 主配置文件 - 覆盖默认配置
+ emqx.conf: |
+ # 节点配置
+ node {
+ name = "emqx@${POD_NAME}.helm-emqxs-headless.xa-dcity-uas-260116.svc.cluster.local"
+ cookie = "emqx-cluster-cookie-secret"
+ data_dir = "/opt/emqx/data"
+ }
+
+ # 集群配置
+ cluster {
+ name = emqxcl
+ # 单节点 建议为 manual 多节点为k8s
+ discovery_strategy = manual
+ k8s {
+ apiserver = "https://kubernetes.default.svc.cluster.local:443"
+ service_name = "helm-emqxs-headless"
+ # 这里可以改为 hostname
+ address_type = dns
+ namespace = "xa-dcity-uas-260116"
+ suffix = "svc.cluster.local"
+ }
+ }
+
+ # 日志配置
+ log {
+ console {
+ enable = true
+ level = info
+ }
+ file {
+ enable = true
+ level = warning
+ path = "/opt/emqx/log"
+ }
+ }
+
+ # Dashboard配置
+ dashboard {
+ listeners.http {
+ bind = "0.0.0.0:18083"
+ }
+ default_username = "admin"
+ default_password = "public"
+ }
+
+ # 监听器配置
+ listeners.tcp.default {
+ bind = "0.0.0.0:1883"
+ max_connections = 1024000
+ }
+
+ listeners.ws.default {
+ bind = "0.0.0.0:8083"
+ max_connections = 1024000
+ websocket.mqtt_path = "/mqtt"
+ }
+
+ listeners.ssl.default {
+ bind = "0.0.0.0:8883"
+ max_connections = 512000
+ }
+
+ # 认证配置 - 使用内置数据库
+ authentication = [
+ {
+ mechanism = password_based
+ backend = built_in_database
+ user_id_type = username
+ password_hash_algorithm {
+ name = sha256
+ salt_position = suffix
+ }
+ # Bootstrap文件路径 - 用于初始化用户
+ bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
+ bootstrap_type = plain
+ }
+ ]
+
+ # 授权配置
+ authorization {
+ no_match = deny
+ deny_action = disconnect
+
+ sources = [
+ {
+ type = built_in_database
+ enable = true
+ }
+ ]
+ }
+
+ # MQTT协议配置
+ mqtt {
+ max_packet_size = "1MB"
+ max_clientid_len = 65535
+ max_topic_levels = 128
+ max_qos_allowed = 2
+ max_topic_alias = 65535
+ retain_available = true
+ wildcard_subscription = true
+ shared_subscription = true
+ }
+
+---
+# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-users
+ namespace: xa-dcity-uas-260116
+data:
+ bootstrap_users.json: |
+ [
+ { "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
+ { "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
+ ]
+
+ # 【修改点】既然有jq,这里使用标准的 JSON 数组格式,最不容易出错
+ bootstrap_acl.json: |
+ [
+ {
+ "username": "admin",
+ "rules": [
+ {"action": "all", "permission": "allow", "topic": "#"}
+ ]
+ },
+ {
+ "username": "cmlc",
+ "rules": [
+ {"action": "publish", "permission": "allow", "topic": "#"},
+ {"action": "subscribe", "permission": "allow", "topic": "#"}
+ ]
+ }
+ ]
+
+---
+# ============== ConfigMap - 初始化脚本 (修正版) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-init-dashboard
+ namespace: xa-dcity-uas-260116
+data:
+ init-dashboard.sh: |
+ #!/bin/bash
+ set -e
+
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
+ EMQX_API="http://localhost:18083/api/v5"
+ ACL_FILE="/bootstrap/bootstrap_acl.json"
+
+ # 辅助函数:打印带时间戳的日志
+ log() {
+ echo "[$(date +'%H:%M:%S')] $1"
+ }
+
+ log "======================================"
+ log "初始化 Dashboard 与 ACL (Debug Version)"
+ log "======================================"
+
+ # ----------------------------------------------------------------
+ # 1. 等待 EMQX API 就绪
+ # ----------------------------------------------------------------
+ log "[1/4] 等待 EMQX API 就绪..."
+ for i in $(seq 1 60); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ log "✓ EMQX API 已就绪"
+ break
+ fi
+ if [ $i -eq 60 ]; then
+ log "✗ EMQX API 启动超时"
+ exit 1
+ fi
+ sleep 5
+ done
+
+ # ----------------------------------------------------------------
+ # 2. 修改 Dashboard 密码
+ # ----------------------------------------------------------------
+ log "[2/4] 检查/更新 Dashboard 密码..."
+
+ # 获取 Token (尝试默认密码)
+ LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -n "$TOKEN" ]; then
+ log " 检测到默认密码,正在更新..."
+ curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
+ log " ✓ Dashboard 密码已更新"
+ else
+ log " ℹ 无法使用默认密码登录,跳过更新(可能已修改)"
+ fi
+
+ # ----------------------------------------------------------------
+ # 3. 导入 ACL 规则
+ # ----------------------------------------------------------------
+ echo "[3/3] 导入ACL规则..."
+
+ # 重新登录获取最新 Token
+ LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -z "$TOKEN" ]; then
+ echo " ✗ 无法获取Token,请检查密码设置"
+ exit 0
+ fi
+
+ if [ -f "$ACL_FILE" ]; then
+ echo " 正在解析 ACL 文件: $ACL_FILE"
+
+ if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
+ echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
+ exit 0
+ fi
+
+ jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
+ USERNAME=$(echo "$user_config" | jq -r '.username // empty')
+
+ # ✅ PUT/POST 都需要 username + rules(username 是 required)
+ REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
+
+ if [ -z "$USERNAME" ]; then
+ echo " ✗ ACL 条目缺少 username,跳过"
+ continue
+ fi
+
+ echo " 配置用户 ${USERNAME} 的ACL规则..."
+
+ # 1) 优先 PUT(覆盖更新)
+ http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code" = "204" ]; then
+ echo " ✓ PUT 更新成功"
+ elif [ "$http_code" = "404" ]; then
+ # 2) 不存在则 POST 创建
+ http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code2" = "204" ]; then
+ echo " ✓ POST 创建成功"
+ else
+ echo " ✗ POST 失败 (HTTP ${http_code2}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+ else
+ echo " ✗ PUT 失败 (HTTP ${http_code}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+
+ # 3) 导入后验证(可选但强烈建议保留)
+ verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
+ -H "Authorization: Bearer ${TOKEN}" \
+ "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
+
+ if [ "$verify_code" = "200" ]; then
+ echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
+ else
+ echo " ✗ 验证失败 (HTTP ${verify_code}):$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
+ exit 1
+ fi
+ done
+
+ echo " ✓ ACL 规则导入完成"
+ else
+ echo " ℹ 未找到 ACL 文件"
+ fi
+
+---
+# ============== StatefulSet ==============
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ podManagementPolicy: Parallel
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: cmii.app
+ operator: In
+ values:
+ - helm-emqxs
+ topologyKey: kubernetes.io/hostname
+
+ imagePullSecrets:
+ - name: harborsecret
+
+ serviceAccountName: helm-emqxs
+
+ securityContext:
+ fsGroup: 1000
+ runAsUser: 1000
+
+ # InitContainer - 准备bootstrap文件
+ initContainers:
+ - name: prepare-bootstrap
+ image: 10.22.57.8:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+ # =========================================================
+ # 权限: 必须以 root 身份运行才能 chown
+ # =========================================================
+ securityContext:
+ runAsUser: 0
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "准备bootstrap文件..."
+
+ # 创建数据目录
+ mkdir -p /opt/emqx/data
+
+ # 复制bootstrap文件到数据目录
+ # 只在文件不存在时复制,避免覆盖已有数据
+ if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
+ cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
+ echo "✓ 已复制用户bootstrap文件"
+ else
+ echo "ℹ 用户bootstrap文件已存在,跳过"
+ fi
+
+ # 设置权限 (现在有root权限,可以成功)
+ chown -R 1000:1000 /opt/emqx/data
+
+ echo "✓ Bootstrap准备完成"
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ - name: bootstrap-users
+ mountPath: /bootstrap-src
+
+ containers:
+ # 主容器 - EMQX
+ - name: emqx
+ image: 10.22.57.8:8033/cmii/emqx:5.8.8
+ imagePullPolicy: IfNotPresent
+
+ env:
+ # Pod信息
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: EMQX_DATA_DIR
+ value: "/opt/emqx/data"
+
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: ws
+ containerPort: 8083
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ limits:
+ cpu: "2000m"
+ memory: "2Gi"
+
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 3
+
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ startupProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 30
+
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ # 使用 subPath 挂载单个配置文件,避免覆盖目录
+ - name: bootstrap-config
+ mountPath: /opt/emqx/etc/emqx.conf
+ subPath: emqx.conf
+
+ # Sidecar - 初始化Dashboard密码和ACL
+ - name: init-dashboard
+ image: 10.22.57.8:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 等待主容器启动
+ echo "等待EMQX启动..."
+ sleep 20
+
+ # 执行初始化
+ /bin/sh /scripts/init-dashboard.sh
+
+ # 保持运行
+ echo "初始化完成,进入守护模式..."
+ while true; do sleep 3600; done
+
+ env:
+ - name: DASHBOARD_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: emqx-credentials
+ key: dashboard-admin-password
+
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "64Mi"
+ limits:
+ cpu: "200m"
+ memory: "128Mi"
+
+ volumeMounts:
+ - name: init-script
+ mountPath: /scripts
+ - name: bootstrap-users
+ mountPath: /bootstrap
+
+ volumes:
+ - name: bootstrap-config
+ configMap:
+ name: emqx-bootstrap-config
+ - name: bootstrap-users
+ configMap:
+ name: emqx-bootstrap-users
+ - name: init-script
+ configMap:
+ name: emqx-init-dashboard
+ defaultMode: 0755
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+
+---
+# ============== Service - Headless ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ targetPort: 4370
+
+---
+# ============== Service - NodePort ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ nodePort: 31883
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ nodePort: 38085
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ nodePort: 38083
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
+ nodePort: 38883
\ No newline at end of file
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-emqx.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-emqx.yaml
new file mode 100644
index 0000000..dd0006d
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-emqx.yaml
@@ -0,0 +1,376 @@
+---
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-emqxs-env
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+data:
+ # 集群相关
+ EMQX_CLUSTER__DISCOVERY: "k8s"
+ EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
+ EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
+ EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
+ EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
+ EMQX_CLUSTER__K8S__NAMESPACE: "xa-dcity-uas-260116"
+ EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
+ # 关闭匿名,默认 ACL 不匹配拒绝
+ EMQX_AUTH__ALLOW_ANONYMOUS: "false"
+ EMQX_AUTHZ__NO_MATCH: "deny"
+ # Dashboard 初始管理员密码(只在第一次启动时生效)
+ EMQX_DASHBOARD__DEFAULT_PASSWORD: "odD8#Ve7.B"
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-emqxs-init-script
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+data:
+ init-mqtt-user.sh: |
+ #!/bin/sh
+ set -e
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="odD8#Ve7.B"
+ MQTT_USER="admin"
+ MQTT_PASS="odD8#Ve7.B"
+ # 等待 EMQX 本地 API 就绪
+ EMQX_API="http://localhost:18083/api/v5"
+ echo "等待 EMQX API 就绪..."
+ for i in $(seq 1 120); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ echo "EMQX API 已就绪"
+ break
+ fi
+ echo "等待中... ($i/120)"
+ sleep 5
+ done
+ # 修改 Dashboard 管理员密码
+ echo "修改 Dashboard 管理员密码..."
+ /opt/emqx/bin/emqx ctl admins passwd "${DASHBOARD_USER}" "${DASHBOARD_PASS}" || echo "密码可能已设置"
+ echo "Dashboard 密码设置完成"
+ # 获取 Dashboard Token
+ echo "获取 Dashboard Token..."
+ TOKEN=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}" \
+ | grep -o '"token":"[^"]*' | cut -d'"' -f4)
+ if [ -z "$TOKEN" ]; then
+ echo "ERROR: 无法获取 Token"
+ exit 1
+ fi
+ echo "Token 获取成功"
+ # 创建内置数据库认证器(使用 listeners 作用域)
+ echo "检查并创建内置数据库认证器..."
+ # 为 tcp:default listener 添加认证器
+ echo "为 listener tcp:default 配置认证器..."
+ curl -s -X POST "${EMQX_API}/authentication/tcp:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "mechanism": "password_based",
+ "backend": "built_in_database",
+ "user_id_type": "username",
+ "password_hash_algorithm": {
+ "name": "sha256",
+ "salt_position": "suffix"
+ }
+ }' 2>/dev/null || echo "tcp:default 认证器可能已存在"
+ # 为 ws:default listener 添加认证器
+ echo "为 listener ws:default 配置认证器..."
+ curl -s -X POST "${EMQX_API}/authentication/ws:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "mechanism": "password_based",
+ "backend": "built_in_database",
+ "user_id_type": "username",
+ "password_hash_algorithm": {
+ "name": "sha256",
+ "salt_position": "suffix"
+ }
+ }' 2>/dev/null || echo "ws:default 认证器可能已存在"
+ # 等待认证器创建完成
+ sleep 2
+ # 创建 MQTT 用户
+ echo "创建 MQTT 用户: ${MQTT_USER}..."
+ curl -s -X POST "${EMQX_API}/authentication/password_based:built_in_database/users?listener_id=tcp:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"user_id\":\"${MQTT_USER}\",\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
+ 2>/dev/null || echo "用户可能已存在,尝试更新..."
+ # 尝试更新密码
+ curl -s -X PUT "${EMQX_API}/authentication/password_based:built_in_database/users/${MQTT_USER}?listener_id=tcp:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
+ 2>/dev/null || true
+ echo "MQTT 用户创建/更新完成"
+ # 创建授权规则
+ echo "配置授权规则..."
+ # 创建内置数据库授权源
+ curl -s -X POST "${EMQX_API}/authorization/sources" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "type": "built_in_database",
+ "enable": true
+ }' 2>/dev/null || echo "授权源可能已存在"
+ sleep 2
+ # 为 admin 用户添加授权规则(使用数组格式)
+ echo "为 ${MQTT_USER} 用户添加 ACL 规则..."
+ curl -s -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "[{\"username\":\"${MQTT_USER}\",\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}]" \
+ 2>/dev/null && echo "ACL 规则创建成功" || echo "规则可能已存在,尝试更新..."
+ # 尝试更新规则(PUT 请求需要单个对象,不是数组)
+ curl -s -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${MQTT_USER}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}" \
+ 2>/dev/null && echo "ACL 规则更新成功" || true
+ echo "ACL 规则配置完成"
+ echo "初始化完成!MQTT 用户: ${MQTT_USER}"
+ echo "可通过以下方式连接:"
+ echo " - MQTT: localhost:1883"
+ echo " - WebSocket: localhost:8083"
+ echo " - Dashboard: http://localhost:18083"
+ echo " - 用户名: ${MQTT_USER}"
+
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - demo
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-emqxs
+ containers:
+ - name: helm-emqxs
+ image: 10.22.57.8:8033/cmii/emqx:5.8.8
+ imagePullPolicy: Always
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: mgmt
+ containerPort: 8081
+ - name: ws
+ containerPort: 8083
+ - name: wss
+ containerPort: 8084
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+ envFrom:
+ - configMapRef:
+ name: helm-emqxs-env
+ # 添加生命周期钩子
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 后台执行初始化脚本,避免阻塞容器启动
+ nohup /bin/sh /scripts/init-mqtt-user.sh > /tmp/init.log 2>&1 &
+ # 添加健康检查,确保 initContainer 执行时 API 已就绪
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ resources: {}
+ volumeMounts:
+ # 5.x 默认 data 目录,包含所有持久化数据
+ - name: emqx-data
+ mountPath: "/opt/emqx/data"
+ readOnly: false
+ - name: init-script
+ mountPath: /scripts
+ volumes:
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+ - name: init-script
+ configMap:
+ name: helm-emqxs-init-script
+ defaultMode: 0755
+
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+rules:
+- apiGroups: [""]
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - watch
+ - list
+
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+subjects:
+- kind: ServiceAccount
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - port: 1883
+ name: mqtt
+ targetPort: 1883
+ nodePort: 31883
+ - port: 18083
+ name: dashboard
+ targetPort: 18083
+ nodePort: 38085
+ - port: 8083
+ name: mqtt-websocket
+ targetPort: 8083
+ nodePort: 38083
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+spec:
+ type: ClusterIP
+ clusterIP: None
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ protocol: TCP
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ protocol: TCP
+ targetPort: 8883
+ - name: mgmt
+ port: 8081
+ protocol: TCP
+ targetPort: 8081
+ - name: websocket
+ port: 8083
+ protocol: TCP
+ targetPort: 8083
+ - name: wss
+ port: 8084
+ protocol: TCP
+ targetPort: 8084
+ - name: dashboard
+ port: 18083
+ protocol: TCP
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ protocol: TCP
+ targetPort: 4370
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-frontend.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-frontend.yaml
new file mode 100644
index 0000000..f17b3fd
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-frontend.yaml
@@ -0,0 +1,203 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-uas
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 0
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-uas
+ image: 10.22.57.8:8033/cmii/cmii-uav-platform-uas:2.2.0-pro-20251223
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-uas
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-uas
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-uas
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-uasms
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: uas-2.2
+spec:
+ replicas: 0
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-uasms
+ image: 10.22.57.8:8033/cmii/cmii-uav-platform-uasms:2.2.0-pro-20251223
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xa-dcity-uas-260116
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-uasms
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-uasms
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-uasms
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: uas-2.2
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-ingress-13014.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-ingress-13014.yaml
new file mode 100644
index 0000000..107fa28
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-ingress-13014.yaml
@@ -0,0 +1,995 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: xa-dcity-uas-260116
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/enable-cors: 'true'
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+spec:
+ rules:
+ - host: fake-domain.xa-dcity-uas-260116.io
+ http:
+ paths:
+ - path: /?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform
+ port:
+ number: 9528
+ - path: /supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-suav-platform-supervision
+ port:
+ number: 9528
+ - path: /supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-suav-platform-supervisionh5
+ port:
+ number: 9528
+ - path: /pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform
+ port:
+ number: 9528
+ - path: /ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-ai-brain
+ port:
+ number: 9528
+ - path: /armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-armypeople
+ port:
+ number: 9528
+ - path: /awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-awareness
+ port:
+ number: 9528
+ - path: /base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-base
+ port:
+ number: 9528
+ - path: /blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-blockchain
+ port:
+ number: 9528
+ - path: /classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-classification
+ port:
+ number: 9528
+ - path: /cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-cms-portal
+ port:
+ number: 9528
+ - path: /detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-detection
+ port:
+ number: 9528
+ - path: /dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-dikongzhixingh5
+ port:
+ number: 9528
+ - path: /dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-dispatchh5
+ port:
+ number: 9528
+ - path: /emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-emergency-rescue
+ port:
+ number: 9528
+ - path: /eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-eventsh5
+ port:
+ number: 9528
+ - path: /flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-flight-control
+ port:
+ number: 9528
+ - path: /hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-hljtt
+ port:
+ number: 9528
+ - path: /hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-hyperspectral
+ port:
+ number: 9528
+ - path: /iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-iot-manager
+ port:
+ number: 9528
+ - path: /jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-jiangsuwenlv
+ port:
+ number: 9528
+ - path: /logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-logistics
+ port:
+ number: 9528
+ - path: /media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-media
+ port:
+ number: 9528
+ - path: /mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-mianyangbackend
+ port:
+ number: 9528
+ - path: /multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-multiterminal
+ port:
+ number: 9528
+ - path: /mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-mws
+ port:
+ number: 9528
+ - path: /oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-oms
+ port:
+ number: 9528
+ - path: /open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-open
+ port:
+ number: 9528
+ - path: /pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-pilot2-to-cloud
+ port:
+ number: 9528
+ - path: /qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-qingdao
+ port:
+ number: 9528
+ - path: /qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-qinghaitourism
+ port:
+ number: 9528
+ - path: /renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-renyike
+ port:
+ number: 9528
+ - path: /scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-scanner
+ port:
+ number: 9528
+ - path: /security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-security
+ port:
+ number: 9528
+ - path: /securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-securityh5
+ port:
+ number: 9528
+ - path: /seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-seniclive
+ port:
+ number: 9528
+ - path: /share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-share
+ port:
+ number: 9528
+ - path: /smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-smauth
+ port:
+ number: 9528
+ - path: /smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-smsecret
+ port:
+ number: 9528
+ - path: /splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-splice
+ port:
+ number: 9528
+ - path: /threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-threedsimulation
+ port:
+ number: 9528
+ - path: /traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-traffic
+ port:
+ number: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uas
+ port:
+ number: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uaskny
+ port:
+ number: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uasms
+ port:
+ number: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uasmskny
+ port:
+ number: 9528
+ - path: /visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-visualization
+ port:
+ number: 9528
+ - path: /uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-platform-manager
+ port:
+ number: 9528
+ - path: /secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-platform-security-center
+ port:
+ number: 9528
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: xa-dcity-uas-260116
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/enable-cors: 'true'
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-data
+ port:
+ number: 8080
+ - host: cmii-admin-gateway.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-gateway
+ port:
+ number: 8080
+ - host: cmii-admin-user.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-user
+ port:
+ number: 8080
+ - host: cmii-app-release.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-app-release
+ port:
+ number: 8080
+ - host: cmii-open-gateway.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-open-gateway
+ port:
+ number: 8080
+ - host: cmii-sky-converge.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-sky-converge
+ port:
+ number: 8080
+ - host: cmii-suav-supervision.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-suav-supervision
+ port:
+ number: 8080
+ - host: cmii-uas-datahub.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-datahub
+ port:
+ number: 8080
+ - host: cmii-uas-gateway.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-gateway
+ port:
+ number: 8080
+ - host: cmii-uas-lifecycle.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-lifecycle
+ port:
+ number: 8080
+ - host: cmii-uav-advanced5g.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-advanced5g
+ port:
+ number: 8080
+ - host: cmii-uav-airspace.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-airspace
+ port:
+ number: 8080
+ - host: cmii-uav-alarm.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-alarm
+ port:
+ number: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-autowaypoint
+ port:
+ number: 8080
+ - host: cmii-uav-brain.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-brain
+ port:
+ number: 8080
+ - host: cmii-uav-bridge.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-bridge
+ port:
+ number: 8080
+ - host: cmii-uav-cloud-live.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-cloud-live
+ port:
+ number: 8080
+ - host: cmii-uav-clusters.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-clusters
+ port:
+ number: 8080
+ - host: cmii-uav-cms.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-cms
+ port:
+ number: 8080
+ - host: cmii-uav-data-post-process.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-data-post-process
+ port:
+ number: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-depotautoreturn
+ port:
+ number: 8080
+ - host: cmii-uav-developer.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-developer
+ port:
+ number: 8080
+ - host: cmii-uav-device.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-device
+ port:
+ number: 8080
+ - host: cmii-uav-emergency.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-emergency
+ port:
+ number: 8080
+ - host: cmii-uav-fwdd.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-fwdd
+ port:
+ number: 8080
+ - host: cmii-uav-gateway.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-gateway
+ port:
+ number: 8080
+ - host: cmii-uav-gis-server.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-gis-server
+ port:
+ number: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-grid-datasource
+ port:
+ number: 8080
+ - host: cmii-uav-grid-engine.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-grid-engine
+ port:
+ number: 8080
+ - host: cmii-uav-grid-manage.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-grid-manage
+ port:
+ number: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-industrial-portfolio
+ port:
+ number: 8080
+ - host: cmii-uav-integration.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-integration
+ port:
+ number: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-iot-dispatcher
+ port:
+ number: 8080
+ - host: cmii-uav-iot-manager.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-iot-manager
+ port:
+ number: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-kpi-monitor
+ port:
+ number: 8080
+ - host: cmii-uav-logger.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-logger
+ port:
+ number: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-material-warehouse
+ port:
+ number: 8080
+ - host: cmii-uav-mission.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-mission
+ port:
+ number: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-mqtthandler
+ port:
+ number: 8080
+ - host: cmii-uav-multilink.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-multilink
+ port:
+ number: 8080
+ - host: cmii-uav-notice.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-notice
+ port:
+ number: 8080
+ - host: cmii-uav-oauth.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-oauth
+ port:
+ number: 8080
+ - host: cmii-uav-process.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-process
+ port:
+ number: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-sec-awareness
+ port:
+ number: 8080
+ - host: cmii-uav-security-trace.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-security-trace
+ port:
+ number: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-sense-adapter
+ port:
+ number: 8080
+ - host: cmii-uav-surveillance.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-surveillance
+ port:
+ number: 8080
+ - host: cmii-uav-sync.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-sync
+ port:
+ number: 8080
+ - host: cmii-uav-tcp-server.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-tcp-server
+ port:
+ number: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-threedsimulation
+ port:
+ number: 8080
+ - host: cmii-uav-tower.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-tower
+ port:
+ number: 8080
+ - host: cmii-uav-user.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-user
+ port:
+ number: 8080
+ - host: cmii-uav-watchdog.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-watchdog
+ port:
+ number: 8080
+ - host: cmii-uav-waypoint.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-waypoint
+ port:
+ number: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-pyfusion
+ port:
+ number: 8080
+ - host: cmii-uavms-security-center.uavcloud-xadcity-uas.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-security-center
+ port:
+ number: 8080
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: xa-dcity-uas-260116
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/enable-cors: 'true'
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/proxy-read-timeout: '3600'
+ nginx.ingress.kubernetes.io/proxy-send-timeout: '3600'
+spec:
+ rules:
+ - host: fake-domain.xa-dcity-uas-260116.io
+ http:
+ paths:
+ - path: /oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-gateway
+ port:
+ number: 8080
+ - path: /open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-open-gateway
+ port:
+ number: 8080
+ - path: /api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-gateway
+ port:
+ number: 8080
+ - path: /uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-gateway
+ port:
+ number: 8080
+ - path: /converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-sky-converge
+ port:
+ number: 8080
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-ingress.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-ingress.yaml
new file mode 100644
index 0000000..2d58030
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-ingress.yaml
@@ -0,0 +1,826 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: xa-dcity-uas-260116
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/supervision)$ $1/ redirect;
+ rewrite ^(/supervisionh5)$ $1/ redirect;
+ rewrite ^(/pangu)$ $1/ redirect;
+ rewrite ^(/ai-brain)$ $1/ redirect;
+ rewrite ^(/armypeople)$ $1/ redirect;
+ rewrite ^(/awareness)$ $1/ redirect;
+ rewrite ^(/base)$ $1/ redirect;
+ rewrite ^(/blockchain)$ $1/ redirect;
+ rewrite ^(/classification)$ $1/ redirect;
+ rewrite ^(/cmsportal)$ $1/ redirect;
+ rewrite ^(/detection)$ $1/ redirect;
+ rewrite ^(/dikongzhixingh5)$ $1/ redirect;
+ rewrite ^(/dispatchh5)$ $1/ redirect;
+ rewrite ^(/emergency)$ $1/ redirect;
+ rewrite ^(/eventsh5)$ $1/ redirect;
+ rewrite ^(/flight-control)$ $1/ redirect;
+ rewrite ^(/hljtt)$ $1/ redirect;
+ rewrite ^(/hyper)$ $1/ redirect;
+ rewrite ^(/iot)$ $1/ redirect;
+ rewrite ^(/jiangsuwenlv)$ $1/ redirect;
+ rewrite ^(/logistics)$ $1/ redirect;
+ rewrite ^(/media)$ $1/ redirect;
+ rewrite ^(/mianyangbackend)$ $1/ redirect;
+ rewrite ^(/multiterminal)$ $1/ redirect;
+ rewrite ^(/mws)$ $1/ redirect;
+ rewrite ^(/oms)$ $1/ redirect;
+ rewrite ^(/open)$ $1/ redirect;
+ rewrite ^(/pilot2cloud)$ $1/ redirect;
+ rewrite ^(/qingdao)$ $1/ redirect;
+ rewrite ^(/qinghaitourism)$ $1/ redirect;
+ rewrite ^(/renyike)$ $1/ redirect;
+ rewrite ^(/scanner)$ $1/ redirect;
+ rewrite ^(/security)$ $1/ redirect;
+ rewrite ^(/securityh5)$ $1/ redirect;
+ rewrite ^(/seniclive)$ $1/ redirect;
+ rewrite ^(/share)$ $1/ redirect;
+ rewrite ^(/smauth)$ $1/ redirect;
+ rewrite ^(/smsecret)$ $1/ redirect;
+ rewrite ^(/splice)$ $1/ redirect;
+ rewrite ^(/threedsimulation)$ $1/ redirect;
+ rewrite ^(/traffic)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/visualization)$ $1/ redirect;
+ rewrite ^(/uavmsmanager)$ $1/ redirect;
+ rewrite ^(/secenter)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.xa-dcity-uas-260116.io
+ http:
+ paths:
+ - path: /260116/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260116/supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervision
+ servicePort: 9528
+ - path: /260116/supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervisionh5
+ servicePort: 9528
+ - path: /260116/pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260116/ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-ai-brain
+ servicePort: 9528
+ - path: /260116/armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-armypeople
+ servicePort: 9528
+ - path: /260116/awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-awareness
+ servicePort: 9528
+ - path: /260116/base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-base
+ servicePort: 9528
+ - path: /260116/blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-blockchain
+ servicePort: 9528
+ - path: /260116/classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-classification
+ servicePort: 9528
+ - path: /260116/cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-cms-portal
+ servicePort: 9528
+ - path: /260116/detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-detection
+ servicePort: 9528
+ - path: /260116/dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dikongzhixingh5
+ servicePort: 9528
+ - path: /260116/dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dispatchh5
+ servicePort: 9528
+ - path: /260116/emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-emergency-rescue
+ servicePort: 9528
+ - path: /260116/eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-eventsh5
+ servicePort: 9528
+ - path: /260116/flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-flight-control
+ servicePort: 9528
+ - path: /260116/hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hljtt
+ servicePort: 9528
+ - path: /260116/hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hyperspectral
+ servicePort: 9528
+ - path: /260116/iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-iot-manager
+ servicePort: 9528
+ - path: /260116/jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-jiangsuwenlv
+ servicePort: 9528
+ - path: /260116/logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-logistics
+ servicePort: 9528
+ - path: /260116/media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-media
+ servicePort: 9528
+ - path: /260116/mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mianyangbackend
+ servicePort: 9528
+ - path: /260116/multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-multiterminal
+ servicePort: 9528
+ - path: /260116/mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mws
+ servicePort: 9528
+ - path: /260116/oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-oms
+ servicePort: 9528
+ - path: /260116/open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-open
+ servicePort: 9528
+ - path: /260116/pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-pilot2-to-cloud
+ servicePort: 9528
+ - path: /260116/qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qingdao
+ servicePort: 9528
+ - path: /260116/qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qinghaitourism
+ servicePort: 9528
+ - path: /260116/renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-renyike
+ servicePort: 9528
+ - path: /260116/scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-scanner
+ servicePort: 9528
+ - path: /260116/security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-security
+ servicePort: 9528
+ - path: /260116/securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-securityh5
+ servicePort: 9528
+ - path: /260116/seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-seniclive
+ servicePort: 9528
+ - path: /260116/share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-share
+ servicePort: 9528
+ - path: /260116/smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smauth
+ servicePort: 9528
+ - path: /260116/smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smsecret
+ servicePort: 9528
+ - path: /260116/splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-splice
+ servicePort: 9528
+ - path: /260116/threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-threedsimulation
+ servicePort: 9528
+ - path: /260116/traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-traffic
+ servicePort: 9528
+ - path: /260116/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /260116/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uaskny
+ servicePort: 9528
+ - path: /260116/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+ - path: /260116/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasmskny
+ servicePort: 9528
+ - path: /260116/visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-visualization
+ servicePort: 9528
+ - path: /260116/uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-manager
+ servicePort: 9528
+ - path: /260116/secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-security-center
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: xa-dcity-uas-260116
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-data
+ servicePort: 8080
+ - host: cmii-admin-gateway.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - host: cmii-admin-user.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-user
+ servicePort: 8080
+ - host: cmii-app-release.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-app-release
+ servicePort: 8080
+ - host: cmii-open-gateway.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - host: cmii-sky-converge.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
+ - host: cmii-suav-supervision.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-supervision
+ servicePort: 8080
+ - host: cmii-uas-datahub.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-datahub
+ servicePort: 8080
+ - host: cmii-uas-gateway.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - host: cmii-uas-lifecycle.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-lifecycle
+ servicePort: 8080
+ - host: cmii-uav-advanced5g.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-advanced5g
+ servicePort: 8080
+ - host: cmii-uav-airspace.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-airspace
+ servicePort: 8080
+ - host: cmii-uav-alarm.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-alarm
+ servicePort: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-autowaypoint
+ servicePort: 8080
+ - host: cmii-uav-brain.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-brain
+ servicePort: 8080
+ - host: cmii-uav-bridge.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-bridge
+ servicePort: 8080
+ - host: cmii-uav-cloud-live.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cloud-live
+ servicePort: 8080
+ - host: cmii-uav-clusters.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-clusters
+ servicePort: 8080
+ - host: cmii-uav-cms.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cms
+ servicePort: 8080
+ - host: cmii-uav-data-post-process.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-data-post-process
+ servicePort: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-depotautoreturn
+ servicePort: 8080
+ - host: cmii-uav-developer.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-developer
+ servicePort: 8080
+ - host: cmii-uav-device.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-device
+ servicePort: 8080
+ - host: cmii-uav-emergency.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-emergency
+ servicePort: 8080
+ - host: cmii-uav-fwdd.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-fwdd
+ servicePort: 8080
+ - host: cmii-uav-gateway.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - host: cmii-uav-gis-server.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gis-server
+ servicePort: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-datasource
+ servicePort: 8080
+ - host: cmii-uav-grid-engine.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-engine
+ servicePort: 8080
+ - host: cmii-uav-grid-manage.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-manage
+ servicePort: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-industrial-portfolio
+ servicePort: 8080
+ - host: cmii-uav-integration.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-integration
+ servicePort: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-dispatcher
+ servicePort: 8080
+ - host: cmii-uav-iot-manager.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-manager
+ servicePort: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-kpi-monitor
+ servicePort: 8080
+ - host: cmii-uav-logger.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-logger
+ servicePort: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-material-warehouse
+ servicePort: 8080
+ - host: cmii-uav-mission.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mission
+ servicePort: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mqtthandler
+ servicePort: 8080
+ - host: cmii-uav-multilink.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-multilink
+ servicePort: 8080
+ - host: cmii-uav-notice.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-notice
+ servicePort: 8080
+ - host: cmii-uav-oauth.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-oauth
+ servicePort: 8080
+ - host: cmii-uav-process.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-process
+ servicePort: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sec-awareness
+ servicePort: 8080
+ - host: cmii-uav-security-trace.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-security-trace
+ servicePort: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sense-adapter
+ servicePort: 8080
+ - host: cmii-uav-surveillance.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-surveillance
+ servicePort: 8080
+ - host: cmii-uav-sync.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sync
+ servicePort: 8080
+ - host: cmii-uav-tcp-server.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tcp-server
+ servicePort: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-threedsimulation
+ servicePort: 8080
+ - host: cmii-uav-tower.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tower
+ servicePort: 8080
+ - host: cmii-uav-user.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-user
+ servicePort: 8080
+ - host: cmii-uav-watchdog.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-watchdog
+ servicePort: 8080
+ - host: cmii-uav-waypoint.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-waypoint
+ servicePort: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-pyfusion
+ servicePort: 8080
+ - host: cmii-uavms-security-center.uavcloud-260116.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-security-center
+ servicePort: 8080
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: xa-dcity-uas-260116
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.xa-dcity-uas-260116.io
+ http:
+ paths:
+ - path: /260116/oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - path: /260116/open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - path: /260116/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - path: /260116/uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /260116/converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-mongo.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-mongo.yaml
new file mode 100644
index 0000000..e06188f
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-mongo.yaml
@@ -0,0 +1,78 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mongo
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ ports:
+ - port: 27017
+ name: server-27017
+ targetPort: 27017
+ nodePort: 37017
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mongo
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+spec:
+ serviceName: helm-mongo
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: helm-mongo
+ image: 10.22.57.8:8033/cmii/mongo:5.0
+ resources: {}
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ volumeMounts:
+ - name: mongo-data
+ mountPath: /data/db
+ readOnly: false
+ subPath: default/helm-mongo/data/db
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+---
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-mysql.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-mysql.yaml
new file mode 100644
index 0000000..66074de
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-mysql.yaml
@@ -0,0 +1,410 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-mysql
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ annotations: {}
+secrets:
+ - name: helm-mysql
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-mysql
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ mysql-root-password: "UXpmWFFoZDNiUQ=="
+ mysql-password: "S0F0cm5PckFKNw=="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ my.cnf: |-
+
+ [mysqld]
+ port=3306
+ basedir=/opt/bitnami/mysql
+ datadir=/bitnami/mysql/data
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ log-error=/bitnami/mysql/data/error.log
+ general_log_file = /bitnami/mysql/data/general.log
+ slow_query_log_file = /bitnami/mysql/data/slow.log
+ innodb_data_file_path = ibdata1:512M:autoextend
+ innodb_buffer_pool_size = 512M
+ innodb_buffer_pool_instances = 2
+ innodb_log_file_size = 512M
+ innodb_log_files_in_group = 4
+ innodb_log_files_in_group = 4
+ log-bin = /bitnami/mysql/data/mysql-bin
+ max_binlog_size=1G
+ transaction_isolation = REPEATABLE-READ
+ default_storage_engine = innodb
+ character-set-server = utf8mb4
+ collation-server=utf8mb4_bin
+ binlog_format = ROW
+ binlog_rows_query_log_events=on
+ binlog_cache_size=4M
+ binlog_expire_logs_seconds = 1296000
+ max_binlog_cache_size=2G
+ gtid_mode = on
+ enforce_gtid_consistency = 1
+ sync_binlog = 1
+ innodb_flush_log_at_trx_commit = 1
+ innodb_flush_method = O_DIRECT
+ log_slave_updates=1
+ relay_log_recovery = 1
+ relay-log-purge = 1
+ default_time_zone = '+08:00'
+ lower_case_table_names=1
+ log_bin_trust_function_creators=1
+ group_concat_max_len=67108864
+ innodb_io_capacity = 4000
+ innodb_io_capacity_max = 8000
+ innodb_flush_sync = 0
+ innodb_flush_neighbors = 0
+ innodb_write_io_threads = 8
+ innodb_read_io_threads = 8
+ innodb_purge_threads = 4
+ innodb_page_cleaners = 4
+ innodb_open_files = 65535
+ innodb_max_dirty_pages_pct = 50
+ innodb_lru_scan_depth = 4000
+ innodb_checksum_algorithm = crc32
+ innodb_lock_wait_timeout = 10
+ innodb_rollback_on_timeout = 1
+ innodb_print_all_deadlocks = 1
+ innodb_file_per_table = 1
+ innodb_online_alter_log_max_size = 4G
+ innodb_stats_on_metadata = 0
+ innodb_thread_concurrency = 0
+ innodb_sync_spin_loops = 100
+ innodb_spin_wait_delay = 30
+ lock_wait_timeout = 3600
+ slow_query_log = 1
+ long_query_time = 10
+ log_queries_not_using_indexes =1
+ log_throttle_queries_not_using_indexes = 60
+ min_examined_row_limit = 100
+ log_slow_admin_statements = 1
+ log_slow_slave_statements = 1
+ default_authentication_plugin=mysql_native_password
+ skip-name-resolve=1
+ explicit_defaults_for_timestamp=1
+ plugin_dir=/opt/bitnami/mysql/plugin
+ max_allowed_packet=128M
+ max_connections = 2000
+ max_connect_errors = 1000000
+ table_definition_cache=2000
+ table_open_cache_instances=64
+ tablespace_definition_cache=1024
+ thread_cache_size=256
+ interactive_timeout = 600
+ wait_timeout = 600
+ tmpdir=/opt/bitnami/mysql/tmp
+ max_allowed_packet=32M
+ bind-address=0.0.0.0
+ performance_schema = 1
+ performance_schema_instrument = '%memory%=on'
+ performance_schema_instrument = '%lock%=on'
+ innodb_monitor_enable=ALL
+
+ [mysql]
+ no-auto-rehash
+
+ [mysqldump]
+ quick
+ max_allowed_packet = 32M
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ default-character-set=UTF8
+ plugin_dir=/opt/bitnami/mysql/plugin
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql-init-scripts
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ create_users_grants_core.sql: |-
+ create user zyly@'%' identified by 'Cmii@451315';
+ grant select on *.* to zyly@'%';
+ create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
+ grant all on *.* to zyly_qc@'%';
+ create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
+ grant all on *.* to k8s_admin@'%';
+ create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
+ grant all on *.* to audit_dba@'%';
+ create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
+ GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
+ create user monitor@'%' identified by 'PL3#nGtrWbf-';
+ grant REPLICATION CLIENT on *.* to monitor@'%';
+ flush privileges;
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-mysql
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+spec:
+ ports:
+ - name: mysql
+ protocol: TCP
+ port: 13306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ cmii.app: mysql
+ cmii.type: middleware
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql-headless
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: mysql
+ port: 3306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: NodePort
+ ports:
+ - name: mysql
+ port: 3306
+ protocol: TCP
+ targetPort: mysql
+ nodePort: 33306
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mysql
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ serviceName: helm-mysql
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-mysql
+ affinity: {}
+ nodeSelector:
+ mysql-deploy: "true"
+ securityContext:
+ fsGroup: 1001
+ initContainers:
+ - name: change-volume-permissions
+ image: 10.22.57.8:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ containers:
+ - name: mysql
+ image: 10.22.57.8:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-mysql
+ key: mysql-root-password
+ - name: MYSQL_DATABASE
+ value: "cmii"
+ ports:
+ - name: mysql
+ containerPort: 3306
+ livenessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ readinessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ startupProbe:
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ - name: config
+ mountPath: /opt/bitnami/mysql/conf/my.cnf
+ subPath: my.cnf
+ volumes:
+ - name: config
+ configMap:
+ name: helm-mysql
+ - name: custom-init-scripts
+ configMap:
+ name: helm-mysql-init-scripts
+ - name: mysql-data
+ hostPath:
+ path: /var/lib/docker/mysql-pv/xa-dcity-uas-260116/
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-nacos.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-nacos.yaml
new file mode 100644
index 0000000..cb6c211
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-nacos.yaml
@@ -0,0 +1,130 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-nacos-cm
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: uas-2.2
+data:
+ mysql.db.name: "cmii_nacos_config"
+ mysql.db.host: "helm-mysql"
+ mysql.port: "3306"
+ mysql.user: "k8s_admin"
+ mysql.password: "fP#UaH6qQ3)8"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-nacos
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: uas-2.2
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ ports:
+ - port: 8848
+ name: server
+ targetPort: 8848
+ nodePort: 38848
+ - port: 9848
+ name: server12
+ targetPort: 9848
+ - port: 9849
+ name: server23
+ targetPort: 9849
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-nacos
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: uas-2.2
+spec:
+ serviceName: helm-nacos
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: nacos-server
+ image: 10.22.57.8:8033/cmii/nacos-server:v2.1.2
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ - containerPort: 9848
+ name: tcp-9848
+ - containerPort: 9849
+ name: tcp-9849
+ env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.name
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.port
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.user
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.password
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.host
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: "hostname"
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+---
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-nfs-test.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-nfs-test.yaml
new file mode 100644
index 0000000..2172684
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-prod-distribute
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 10.22.57.8:8033/cmii/busybox:latest
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-nfs.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-nfs.yaml
new file mode 100644
index 0000000..2c3f4d0
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-prod-distribute
+provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: 10.22.57.8:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-storage
+ - name: NFS_SERVER
+ value: 10.22.57.4
+ - name: NFS_PATH
+ value: /var/lib/docker/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 10.22.57.4
+ path: /var/lib/docker/nfs_data
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-pvc.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-pvc.yaml
new file mode 100644
index 0000000..5214687
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-pvc.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.2
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 100Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-emqxs
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-emqxs
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.2
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-mongo
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-mongo
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.2
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 30Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-rabbitmq
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-rabbitmq
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.2
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-pyfusion-configmap.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-pyfusion-configmap.yaml
new file mode 100644
index 0000000..a8257b9
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-pyfusion-configmap.yaml
@@ -0,0 +1,16 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: pyfusion-configmap
+ namespace: xa-dcity-uas-260116
+data:
+ config.yaml: |-
+ mqtt:
+ broker: "helm-emqxs"
+ port: 1883
+ username: "cmlc"
+ password: "odD8#Ve7.B"
+
+ topics:
+ mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
+ sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-rabbitmq.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-rabbitmq.yaml
new file mode 100644
index 0000000..6d21c30
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-rabbitmq.yaml
@@ -0,0 +1,328 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-rabbitmq
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+automountServiceAccountToken: true
+secrets:
+ - name: helm-rabbitmq
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-rabbitmq
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+type: Opaque
+data:
+ rabbitmq-password: "blljUk45MXIuX2hq"
+ rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-rabbitmq-config
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+data:
+ rabbitmq.conf: |-
+ ## Username and password
+ ##
+ default_user = admin
+ default_pass = nYcRN91r._hj
+ ## Clustering
+ ##
+ cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
+ cluster_formation.node_cleanup.interval = 10
+ cluster_formation.node_cleanup.only_log_warning = true
+ cluster_partition_handling = autoheal
+ # queue master locator
+ queue_master_locator = min-masters
+ # enable guest user
+ loopback_users.guest = false
+ #default_vhost = default-vhost
+ #disk_free_limit.absolute = 50MB
+ #load_definitions = /app/load_definition.json
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+subjects:
+ - kind: ServiceAccount
+ name: helm-rabbitmq
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: helm-rabbitmq-endpoint-reader
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq-headless
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ clusterIP: None
+ ports:
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ - name: dist
+ port: 25672
+ targetPort: dist
+ - name: dashboard
+ port: 15672
+ targetPort: stats
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ publishNotReadyAddresses: true
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ type: NodePort
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ nodePort: 35672
+ - name: dashboard
+ port: 15672
+ targetPort: dashboard
+ nodePort: 36675
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xa-dcity-uas-260116
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-rabbitmq
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ serviceName: helm-rabbitmq-headless
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: rabbitmq
+ annotations:
+ checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
+ checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-rabbitmq
+ affinity: {}
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ - name: volume-permissions
+ image: 10.22.57.8:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ securityContext:
+ runAsUser: 0
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ containers:
+ - name: rabbitmq
+ image: 10.22.57.8:8033/cmii/rabbitmq:3.9.12-debian-10-r3
+ imagePullPolicy: "Always"
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: "helm-rabbitmq-headless"
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: K8S_HOSTNAME_SUFFIX
+ value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: RABBITMQ_MNESIA_DIR
+ value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: "-"
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-erlang-cookie
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: "admin"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-password
+ - name: RABBITMQ_PLUGINS
+ value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
+ ports:
+ - name: amqp
+ containerPort: 5672
+ - name: dist
+ containerPort: 25672
+ - name: dashboard
+ containerPort: 15672
+ - name: epmd
+ containerPort: 4369
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: configuration
+ mountPath: /bitnami/rabbitmq/conf
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ volumes:
+ - name: configuration
+ configMap:
+ name: helm-rabbitmq-config
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-redis.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-redis.yaml
new file mode 100644
index 0000000..390b55e
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-redis.yaml
@@ -0,0 +1,585 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: true
+metadata:
+ name: helm-redis
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-redis
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ redis-password: "TWNhY2hlQDQ1MjI="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-configuration
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ # End of common configuration
+ master.conf: |-
+ dir /data
+ # User-supplied master configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of master configuration
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ # User-supplied replica configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of replica configuration
+---
+# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-health
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-scripts
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+data:
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec redis-server "${ARGS[@]}"
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo 26379
+ ;;
+ "REDIS")
+ echo 6379
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec redis-server "${ARGS[@]}"
+---
+# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-headless
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+---
+# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-master
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ type: ClusterIP
+
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-replicas
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/component: replica
+---
+# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-master
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ affinity: {}
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ imagePullSecrets:
+ - name: harborsecret
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 10.22.57.8:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-replicas
+ namespace: xa-dcity-uas-260116
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/component: replica
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xa-dcity-uas-260116
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 10.22.57.8:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.xa-dcity-uas-260116.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+
diff --git a/71-202601-XA监管平台/k8s-yaml/k8s-srs.yaml b/71-202601-XA监管平台/k8s-yaml/k8s-srs.yaml
new file mode 100644
index 0000000..f447137
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/k8s-srs.yaml
@@ -0,0 +1,496 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-srs-cm
+ namespace: xa-dcity-uas-260116
+ labels:
+ cmii.app: live-srs
+ cmii.type: live
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+data:
+ srs.rtc.conf: |-
+ listen 31935;
+ max_connections 4096;
+ srs_log_tank console;
+ srs_log_level info;
+ srs_log_file /home/srs.log;
+ daemon off;
+ http_api {
+ enabled on;
+ listen 1985;
+ crossdomain on;
+ }
+ stats {
+ network 0;
+ }
+ http_server {
+ enabled on;
+ listen 8080;
+ dir /home/hls;
+ }
+ srt_server {
+ enabled on;
+ listen 30556;
+ maxbw 1000000000;
+ connect_timeout 4000;
+ peerlatency 600;
+ recvlatency 600;
+ }
+ rtc_server {
+ enabled on;
+ listen 30090;
+ candidate $CANDIDATE;
+ }
+ vhost __defaultVhost__ {
+ http_hooks {
+ enabled on;
+ on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
+ }
+ http_remux {
+ enabled on;
+ }
+ rtc {
+ enabled on;
+ rtmp_to_rtc on;
+ rtc_to_rtmp on;
+ keep_bframe off;
+ }
+ tcp_nodelay on;
+ min_latency on;
+ play {
+ gop_cache off;
+ mw_latency 100;
+ mw_msgs 10;
+ }
+ publish {
+ firstpkt_timeout 8000;
+ normal_timeout 4000;
+ mr on;
+ }
+ dvr {
+ enabled off;
+ dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
+ dvr_plan session;
+ }
+ hls {
+ enabled on;
+ hls_path /home/hls;
+ hls_fragment 10;
+ hls_window 60;
+ hls_m3u8_file [app]/[stream].m3u8;
+ hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
+ hls_cleanup on;
+ hls_entry_prefix http://144.7.97.167:8088;
+ }
+ }
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc-exporter
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ nodePort: 31935
+ - name: rtc
+ protocol: UDP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: rtc-tcp
+ protocol: TCP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: srt
+ protocol: UDP
+ port: 30556
+ targetPort: 30556
+ nodePort: 30556
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ nodePort: 30080
+ selector:
+ srs-role: rtc
+ type: NodePort
+ sessionAffinity: None
+ externalTrafficPolicy: Cluster
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srsrtc-svc
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: helm-live-srs-rtc
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-srs
+ cmii.type: live
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ srs-role: rtc
+ template:
+ metadata:
+ labels:
+ srs-role: rtc
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-srs-cm
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ defaultMode: 420
+ - name: srs-vol
+ emptyDir:
+ sizeLimit: 8Gi
+ containers:
+ - name: srs-rtc
+ image: 10.22.57.8:8033/cmii/srs:v5.0.195
+ ports:
+ - name: srs-rtmp
+ containerPort: 31935
+ protocol: TCP
+ - name: srs-api
+ containerPort: 1985
+ protocol: TCP
+ - name: srs-flv
+ containerPort: 8080
+ protocol: TCP
+ - name: srs-webrtc
+ containerPort: 30090
+ protocol: UDP
+ - name: srs-webrtc-tcp
+ containerPort: 30090
+ protocol: TCP
+ - name: srs-srt
+ containerPort: 30556
+ protocol: UDP
+ env:
+ - name: CANDIDATE
+ value: 144.7.97.167
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /usr/local/srs/conf/docker.conf
+ subPath: docker.conf
+ - name: srs-vol
+ mountPath: /home/dvr
+ subPath: xa-dcity-uas-260116/helm-live/dvr
+ - name: srs-vol
+ mountPath: /home/hls
+ subPath: xa-dcity-uas-260116/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ - name: oss-adaptor
+ image: 10.22.57.8:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ env:
+ - name: OSS_ENDPOINT
+ value: 'http://helm-minio:9000'
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: 'B#923fC7mk'
+ - name: OSS_BUCKET
+ value: live-cluster-hls
+ - name: SRS_OP
+ value: 'http://helm-live-op-svc-v2:8080'
+ - name: MYSQL_ENDPOINT
+ value: 'helm-mysql:3306'
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: 'yes'
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-vol
+ mountPath: /cmii/share/hls
+ subPath: xa-dcity-uas-260116/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ serviceName: helm-live-srsrtc-svc
+ podManagementPolicy: OrderedReady
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: 0
+ revisionHistoryLimit: 10
+---
+# live-srs部分
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-live-op-v2
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+ helm.sh/chart: cmlc-live-live-op-2.0.0
+ live-role: op-v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ live-role: op-v2
+ template:
+ metadata:
+ labels:
+ live-role: op-v2
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-op-cm-v2
+ items:
+ - key: live.op.conf
+ path: bootstrap.yaml
+ defaultMode: 420
+ containers:
+ - name: helm-live-op-v2
+ image: 10.22.57.8:8033/cmii/cmii-live-operator:5.2.0
+ ports:
+ - name: operator
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 4800m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /cmii/bootstrap.yaml
+ subPath: bootstrap.yaml
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc-v2
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ nodePort: 30333
+ selector:
+ live-role: op-v2
+ type: NodePort
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ live-role: op
+ type: ClusterIP
+ sessionAffinity: None
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-op-cm-v2
+ namespace: xa-dcity-uas-260116
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+data:
+ live.op.conf: |-
+ server:
+ port: 8080
+ spring:
+ main:
+ allow-bean-definition-overriding: true
+ allow-circular-references: true
+ application:
+ name: cmii-live-operator
+ platform:
+ info:
+ name: cmii-live-operator
+ description: cmii-live-operator
+ version: uas-2.2
+ scanPackage: com.cmii.live.op
+ cloud:
+ nacos:
+ config:
+ username: nacos
+ password: KingKong@95461234
+ server-addr: helm-nacos:8848
+ extension-configs:
+ - data-id: cmii-live-operator.yml
+ group: uas-2.2
+ refresh: true
+ shared-configs:
+ - data-id: cmii-backend-system.yml
+ group: uas-2.2
+ refresh: true
+ discovery:
+ enabled: false
+
+ live:
+ engine:
+ type: srs
+ endpoint: 'http://helm-live-srs-svc:1985'
+ proto:
+ rtmp: 'rtmp://144.7.97.167:31935'
+ rtsp: 'rtsp://144.7.97.167:30554'
+ srt: 'srt://144.7.97.167:30556'
+ flv: 'http://144.7.97.167:30500'
+ hls: 'http://144.7.97.167:30500'
+ rtc: 'webrtc://144.7.97.167:30080'
+ replay: 'https://144.7.97.167:30333'
+ minio:
+ endpoint: http://helm-minio:9000
+ access-key: cmii
+ secret-key: B#923fC7mk
+ bucket: live-cluster-hls
diff --git a/71-202601-XA监管平台/k8s-yaml/x_minio初始化.sh b/71-202601-XA监管平台/k8s-yaml/x_minio初始化.sh
new file mode 100644
index 0000000..71e90ef
--- /dev/null
+++ b/71-202601-XA监管平台/k8s-yaml/x_minio初始化.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+export tenant_name=outside
+export inner_master_ip=helm-rabbitmq
+export minio_host_ip=10.22.57.8
+
+mc alias set ${tenant_name} http://${minio_host_ip}:39000 cmii B#923fC7mk
+
+
+mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata ${tenant_name}/ilm-detect ${tenant_name}/ilm-geodata
+echo ""
+
+echo "set rabbit mq"
+mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:5672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
+echo ""
+
+echo "sleep 5 s!"
+sleep 5
+
+mc admin service restart ${tenant_name}
+
+echo "sleep 5 s!"
+sleep 5
+echo ""
+
+
+echo "start to add event notification !"
+
+mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/ilm-detect arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/ilm-geodata arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
+
+mc ilm add --expiry-days "1" ${tenant_name}/tus
+
+echo ""
+echo "done of init !"
\ No newline at end of file
diff --git a/71-202601-XA监管平台/rke-13014-cluster.yml b/71-202601-XA监管平台/rke-13014-cluster.yml
new file mode 100644
index 0000000..3c019d0
--- /dev/null
+++ b/71-202601-XA监管平台/rke-13014-cluster.yml
@@ -0,0 +1,258 @@
+nodes:
+ - address: 10.22.57.8
+ user: root
+ role:
+ - controlplane
+ - etcd
+ - worker
+ internal_address: 10.22.57.8
+ hostname_override: "master-10-22-57-8"
+ labels:
+ ingress-deploy: true
+ uavcloud.env: demo
+ - address: 10.22.57.5
+ user: root
+ role:
+ - worker
+ internal_address: 10.22.57.5
+ hostname_override: "worker-1-10-22-57-5"
+ labels:
+ uavcloud.env: demo
+ - address: 10.22.57.6
+ user: root
+ role:
+ - worker
+ internal_address: 10.22.57.6
+ hostname_override: "worker-2-10-22-57-6"
+ labels:
+ uavcloud.env: demo
+ - address: 10.22.57.7
+ user: root
+ role:
+ - worker
+ internal_address: 10.22.57.7
+ hostname_override: "worker-3-10-22-57-7"
+ labels:
+ uavcloud.env: demo
+ - address: 10.22.57.3
+ user: root
+ role:
+ - worker
+ internal_address: 10.22.57.3
+ hostname_override: "worker-4-10-22-57-3"
+ labels:
+ mysql-deploy: true
+ - address: 10.22.57.4
+ user: root
+ role:
+ - worker
+ internal_address: 10.22.57.4
+ hostname_override: "worker-5-10-22-57-4"
+ labels:
+ minio-deploy: true
+ doris-deploy: true
+
+authentication:
+ strategy: x509
+ sans:
+ - "10.22.57.8"
+
+private_registries:
+ - url: 10.22.57.8:8033 # 私有镜像库地址
+ user: admin
+ password: "V2ryStr@ngPss"
+ is_default: true
+
+##############################################################################
+
+# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
+ignore_docker_version: true
+
+# Set the name of the Kubernetes cluster
+cluster_name: rke-cluster
+
+kubernetes_version: v1.30.14-rancher1-1
+
+ssh_key_path: /root/.ssh/id_ed25519
+
+# Enable running cri-dockerd
+# Up to Kubernetes 1.23, kubelet contained code called dockershim
+# to support Docker runtime. The replacement is called cri-dockerd
+# and should be enabled if you want to keep using Docker as your
+# container runtime
+# Only available to enable in Kubernetes 1.21 and higher
+enable_cri_dockerd: true
+
+services:
+ etcd:
+ backup_config:
+ enabled: false
+ interval_hours: 72
+ retention: 3
+ safe_timestamp: false
+ timeout: 300
+ creation: 12h
+ extra_args:
+ election-timeout: 5000
+ heartbeat-interval: 500
+ cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
+ gid: 0
+ retention: 72h
+ snapshot: false
+ uid: 0
+
+ kube-api:
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-controller
+ service_cluster_ip_range: 10.74.0.0/16
+ # Expose a different port range for NodePort services
+ service_node_port_range: 30000-40000
+ always_pull_images: false
+ pod_security_policy: false
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Enable audit log to stdout
+ audit-log-path: "-"
+ # Increase number of delete workers
+ delete-collection-workers: 3
+ # Set the level of log output to warning-level
+ v: 1
+ kube-controller:
+ # CIDR pool used to assign IP addresses to pods in the cluster
+ cluster_cidr: 10.100.0.0/16
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-api
+ service_cluster_ip_range: 10.74.0.0/16
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Set the level of log output to debug-level
+ v: 1
+ # Enable RotateKubeletServerCertificate feature gate
+ feature-gates: RotateKubeletServerCertificate=true
+ # Enable TLS Certificates management
+ # https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
+ cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
+ kubelet:
+ # Base domain for the cluster
+ cluster_domain: cluster.local
+ # IP address for the DNS service endpoint
+ cluster_dns_server: 10.74.0.10
+ # Fail if swap is on
+ fail_swap_on: false
+ # Set max pods to 250 instead of default 110
+ extra_binds:
+ - "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
+ extra_args:
+ max-pods: 122
+ # Optionally define additional volume binds to a service
+ scheduler:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 0
+ tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
+ kubeproxy:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 1
+
+authorization:
+ mode: rbac
+
+addon_job_timeout: 30
+
+# Specify network plugin-in (canal, calico, flannel, weave, or none)
+network:
+ mtu: 1440
+ options:
+ flannel_backend_type: vxlan
+ plugin: calico
+ tolerations:
+ - key: "node.kubernetes.io/unreachable"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+ - key: "node.kubernetes.io/not-ready"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+
+# Specify DNS provider (coredns or kube-dns)
+dns:
+ provider: coredns
+ nodelocal: {}
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 20%
+ maxSurge: 15%
+ linear_autoscaler_params:
+ cores_per_replica: 0.34
+ nodes_per_replica: 4
+ prevent_single_point_failure: true
+ min: 2
+ max: 3
+ tolerations:
+ - key: "node.kubernetes.io/unreachable"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+ - key: "node.kubernetes.io/not-ready"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+
+# Specify monitoring provider (metrics-server)
+monitoring:
+ provider: metrics-server
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 8
+
+ingress:
+ provider: nginx
+ default_backend: true
+ http_port: 30500
+ https_port: 31500
+ extra_envs:
+ - name: TZ
+ value: Asia/Shanghai
+ node_selector:
+ ingress-deploy: true
+ options:
+ use-forwarded-headers: "true"
+ access-log-path: /var/log/nginx/access.log
+ client-body-timeout: '6000'
+ compute-full-forwarded-for: 'true'
+ enable-underscores-in-headers: 'true'
+ log-format-escape-json: 'true'
+ log-format-upstream: >-
+ { "msec": "$msec", "connection": "$connection", "connection_requests":
+ "$connection_requests", "pid": "$pid", "request_id": "$request_id",
+ "request_length": "$request_length", "remote_addr": "$remote_addr",
+ "remote_user": "$remote_user", "remote_port": "$remote_port",
+ "http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
+ "$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
+ "request_uri": "$request_uri", "args": "$args", "status": "$status",
+ "body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
+ "http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
+ "http_host": "$http_host", "server_name": "$server_name", "request_time":
+ "$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
+ "$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
+ "upstream_response_time": "$upstream_response_time",
+ "upstream_response_length": "$upstream_response_length",
+ "upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
+ "$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
+ "request_method": "$request_method", "server_protocol": "$server_protocol",
+ "pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
+ "geoip_country_code": "$geoip_country_code" }
+ proxy-body-size: 5120m
+ proxy-read-timeout: '6000'
+ proxy-send-timeout: '6000'
+
+
diff --git a/72-202602-绵阳飞服/1-批量脚本.sh b/72-202602-绵阳飞服/1-批量脚本.sh
new file mode 100644
index 0000000..4b647b0
--- /dev/null
+++ b/72-202602-绵阳飞服/1-批量脚本.sh
@@ -0,0 +1,60 @@
+
+192.168.1.4
+
+
+移动云:
+MYDKFX2026
+
+wrbvGO57
+
+,您的客户经理沈玉雪(联系方式:18380551056)
+2、公网IP地址:36.133.66.183
+3、服务器密码都是:v6w6apLf7=
+
+## 192.168.1.6 2TB
+
+mv agent-wdd_linux_amd64 /usr/local/bin/agent-wdd
+chmod +x /usr/local/bin/agent-wdd
+
+# 主节点安装ssh-key
+/usr/local/bin/agent-wdd base ssh config
+/usr/local/bin/agent-wdd base ssh key
+
+DEFAULT_HTTP_BACKEND_IP=$(kubectl -n ingress-nginx get svc default-http-backend -o jsonpath='{.spec.clusterIP}')
+
+# 批量执行命令
+host_list=(
+ 192.168.1.3
+ 192.168.1.5
+ 192.168.1.2
+ 192.168.1.6
+)
+
+for server in "${host_list[@]}";do
+ echo " ---> current ip is $server - $(hostname) \n"
+
+ ssh root@"$server" "DEFAULT_HTTP_BACKEND_IP='$DEFAULT_HTTP_BACKEND_IP' bash -s" < /etc/apt/apt.conf.d/01proxy"
+ssh root@${server} "printf '%s\n' \
+'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
+'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
+| tee /etc/apt/apt.conf.d/01proxy >/dev/null"
+ssh root@${server} "apt-get update"
+ssh root@${server} "apt-get install -y gparted"
+
+
+apt-get install -y docker.io=20.10.12-0ubuntu4 containerd=1.7.28-0ubuntu1~22.04.1 docker-buildx=0.20.1-0ubuntu1~22.04.2 docker-compose=1.29.2-1
\ No newline at end of file
diff --git a/72-202602-绵阳飞服/ImageSyncDLTU.sh b/72-202602-绵阳飞服/ImageSyncDLTU.sh
new file mode 100644
index 0000000..d7a21cb
--- /dev/null
+++ b/72-202602-绵阳飞服/ImageSyncDLTU.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+
+all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
+gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
+oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
+local_gzip_path="/root/octopus-image"
+
+DockerRegisterDomain="192.168.1.4:8033" # 需要根据实际修改
+HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
+
+print_green() {
+ echo -e "\033[32m${1}\033[0m"
+ echo ""
+}
+
+print_red() {
+ echo -e "\033[31m${1}\033[0m"
+ echo ""
+}
+
+Download_Load_Tag_Upload() {
+ print_green "[DLTU] - start !"
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ rke)
+ # print_green "download rke "
+ local_gzip_path="$local_gzip_path/rke13014"
+ mkdir -p ${local_gzip_path}
+ oss_prefix_url="$oss_prefix_url/rke13014/"
+ dltu
+ shift # past argument
+ ;;
+ middle)
+ local_gzip_path="$local_gzip_path/middle"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/middle/"
+ dltu
+ shift # past argument
+ ;;
+ cmii)
+ local_gzip_path="$local_gzip_path/xauas22"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/xauas22/"
+ dltu
+ shift # past argument
+ ;;
+ *)
+ # unknown option
+ print_red "bad arguments"
+ ;;
+ esac
+ done
+
+}
+
+dltu() {
+ print_green "download all image name list and gzip file list!"
+ cd $local_gzip_path || exit
+
+ rm $all_image_list_txt
+ rm $gzip_image_list_txt
+
+ wget "$oss_prefix_url$all_image_list_txt"
+ wget "$oss_prefix_url$gzip_image_list_txt"
+
+ docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
+ echo ""
+ while IFS= read -r i; do
+ [ -z "${i}" ] && continue
+ echo "download gzip file =>: $oss_prefix_url${i}"
+ if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
+ echo "Gzip file download success : ${i}"
+ image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
+
+ app_name=$(echo "$image_full_name" | sed 's|.*/||g')
+ echo "extract short name is $app_name"
+
+ if echo $image_full_name | grep -q "rancher"
+ then
+ print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
+ docker push $DockerRegisterDomain/rancher/$app_name
+ else
+ print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
+ docker push $DockerRegisterDomain/cmii/$app_name
+ fi
+
+ else
+ print_red "Gzip file download FAILED : ${i}"
+ fi
+ echo "-------------------------------------------------"
+ done <"${gzip_image_list_txt}"
+ shift
+
+}
+
+Load_Tag_Upload(){
+ print_green "[LTU] - start to load image from offline !"
+
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ rke)
+ # print_green "download rke "
+ local_gzip_path="$local_gzip_path/rke13014"
+ mkdir -p ${local_gzip_path}
+ oss_prefix_url="$oss_prefix_url/rke13014/"
+ ltu
+ shift # past argument
+ ;;
+ middle)
+ local_gzip_path="$local_gzip_path/middle"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/middle/"
+ ltu
+ shift # past argument
+ ;;
+ cmii)
+ local_gzip_path="$local_gzip_path/cmii"
+ mkdir -p $local_gzip_path
+ oss_prefix_url="$oss_prefix_url/cmii/"
+ ltu
+ shift # past argument
+ ;;
+ *)
+ # unknown option
+ print_red "bad arguments"
+ ;;
+ esac
+ done
+
+}
+
+ltu(){
+ all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
+
+ for file in $all_file_list; do
+ echo "offline gzip file is => : $file"
+ image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
+
+ docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
+
+ app_name=$(echo "$image_full_name" | sed 's|.*/||g')
+ echo "extract short name is $app_name"
+
+ if echo $image_full_name | grep -q "rancher"
+ then
+ print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
+ docker push $DockerRegisterDomain/rancher/$app_name
+ else
+ print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
+ docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
+ docker push $DockerRegisterDomain/cmii/$app_name
+ fi
+ done
+}
+
+
+test(){
+ app_name=$(echo "nginx:latest" | sed 's|.*/||g')
+ echo "extract short name is $app_name"
+}
+
+# test
+Download_Load_Tag_Upload "rke"
+
+# Load_Tag_Upload "cmii"
\ No newline at end of file
diff --git a/72-202602-绵阳飞服/cmii-update.sh b/72-202602-绵阳飞服/cmii-update.sh
new file mode 100644
index 0000000..41d6c5f
--- /dev/null
+++ b/72-202602-绵阳飞服/cmii-update.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+harbor_host=192.168.1.4:8033
+namespace=sc-my-uav-260202
+app_name=""
+new_tag=""
+
+download_from_oss() {
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+
+ echo "start to download => $1"
+ wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
+
+ echo ""
+ echo ""
+}
+
+upload_image_to_harbor(){
+ if [ "$app_name" == "" ]; then
+ echo "app name null exit!"
+ exit 233
+ fi
+
+ if ! docker load < "$1"; then
+ echo "docker load error !"
+ fi
+ docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+ echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
+ docker login -u admin -p V2ryStr@ngPss $harbor_host
+ docker push "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+
+}
+
+parse_args(){
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+ local image_name="$1"
+
+ # cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
+ app_name=$(echo $image_name | cut -d "=" -f1)
+ new_tag=$(echo $image_name | cut -d "=" -f2)
+}
+
+update_image_tag(){
+ if [ "$new_tag" == "" ]; then
+ echo "new tag error!"
+ exit 233
+ fi
+
+ local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
+
+ echo "image grep is => ${image_prefix}"
+
+ echo "start to update ${namespace} ${app_name} to ${new_tag} !"
+ echo ""
+ kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
+ echo ""
+ echo "start to wait for 3 seconds!"
+ sleep 3
+ local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
+ echo ""
+ echo "new image are => $image_new"
+ echo ""
+}
+
+main(){
+ parse_args "$1"
+ download_from_oss "$1"
+ upload_image_to_harbor "$1"
+ update_image_tag
+}
+
+main "$@"
\ No newline at end of file
diff --git a/72-202602-绵阳飞服/k8s-admin-token.txt b/72-202602-绵阳飞服/k8s-admin-token.txt
new file mode 100644
index 0000000..b3bd832
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-admin-token.txt
@@ -0,0 +1 @@
+eyJhbGciOiJSUzI1NiIsImtpZCI6ImVldHZIMnEyWjJsTDV1TWNPNTZmZjF2d1RxeDgzdDNidHlWSXlIMGNaV3cifQ.eyJhdWQiOlsidW5rbm93biJdLCJleHAiOjE4NjQ3MTAxODAsImlhdCI6MTc3MDEwMjE4MCwiaXNzIjoicmtlIiwianRpIjoiY2Q2MTJmNzItOWY1NS00MGFiLTg3MjMtZGJjNTc3MmVhNTIyIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiOTcwOTlkNTctOWNiMS00YjcxLTkyNTMtOGI1OTUyNWJmZDBkIn19LCJuYmYiOjE3NzAxMDIxODAsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.pL6lP3HUh9RMfnA0JBha-GnWEON9nnltnLaNsamr1AIgUReN5hhl-IsD3rbhKUcHvm9sCy9cFkQxVbt81CapGN0zxgfNvlVzxzCdQZpd1vtmGp70FSaT7wQoBC7pAWSHU0FUEltMUKWGSBvy0ZtDHEvRilk4Ie4MVN89hCPNOEiZaIC2QagSl7oBd3ppJSnHAMB7eM3pJDP2OhKNWBddNqC0YuHdbk6JHTkfmF2Xe3CYLTYfK6A1GCfVXGUApHgiRB0Dq1BT9M7wcIb4ZOlo9kzvdbISDSyyMiwTZQzR-_VyifyBXCilsYGYb8TnUXUqOVCmuxaGzUJzXMQsPEfuGQ
\ No newline at end of file
diff --git a/72-202602-绵阳飞服/k8s-app/fly-center.yaml b/72-202602-绵阳飞服/k8s-app/fly-center.yaml
new file mode 100644
index 0000000..04783af
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-app/fly-center.yaml
@@ -0,0 +1,215 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/app-version: 6.0.0
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: cmii-fly-center
+ cmii.type: backend
+ octopus/control: backend-app-1.0.0
+
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: cmii-fly-center
+ cmii.type: backend
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ cmii.app: cmii-fly-center
+ cmii.type: backend
+ spec:
+ volumes:
+ - name: application-k8s
+ configMap:
+ name: cmii-fly-center-cm
+ items:
+ - key: application-k8s.yml
+ path: application-k8s.yml
+ defaultMode: 420
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+ containers:
+ - name: cmii-fly-center
+ image: 192.168.1.4:8033/cmii/cmii-fly-center:.0.0-012601
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: K8S_NAMESPACE
+ value: ahydapp
+ - name: APPLICATION_NAME
+ value: cmii-fly-center
+ - name: CUST_JAVA_OPTS
+ value: '-Xms2000m -Xmx4500m -Dlog4j2.formatMsgNoLookups=true'
+ - name: NACOS_REGISTRY
+ value: helm-nacos:8848
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: '8080'
+ - name: BIZ_CONFIG_GROUP
+ value: 5.7.0
+ - name: SYS_CONFIG_GROUP
+ value: 5.7.0
+ - name: IMAGE_VERSION
+ value: 5.7.0
+ - name: NACOS_USERNAME
+ value: developer
+ - name: NACOS_PASSWORD
+ value: Deve@9128201
+ - name: SPRING_PROFILES_ACTIVE
+ value: k8s,db,cache,message
+ resources:
+ limits:
+ cpu: '4'
+ memory: 6Gi
+ requests:
+ cpu: '4'
+ memory: 2Gi
+ volumeMounts:
+ - name: application-k8s
+ mountPath: /cmii/config/application-k8s.yml
+ subPath: application-k8s.yml
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ subPath: uavcloud-devflight/cmii-fly-center
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - mianyang
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/app-version: 6.0.0
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: cmii-fly-center
+ cmii.type: backend
+ octopus/control: backend-app-1.0.0
+
+spec:
+ ports:
+ - name: backend-tcp
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ cmii.app: cmii-fly-center
+ cmii.type: backend
+ type: ClusterIP
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: cmii-fly-center-cm
+ namespace: sc-my-uav-260202
+data:
+ application-k8s.yml: |
+ center:
+ ####################下面部分为中间件对应配置,需要确认!!!#######################
+ ############使用k8s部署的中间件可以直接用k8s里面的服务名#################
+ db:
+ ip: helm-mysql
+ port: 3306
+ username: k8s_admin
+ password: fP#UaH6qQ3)8
+ rabbitmq:
+ ip: helm-rabbitmq
+ port: 5672
+ username: admin
+ password: nYcRN91r._hj
+ redis:
+ ip: helm-redis-master
+ port: 6379
+ password: Mcache@4522
+ mqtt:
+ BASIC:
+ ip: helm-emqxs
+ port: 1883 # mqtt内部1883端口
+ username: cmlc
+ password: odD8#Ve7.B
+ DRC:
+ ip: 36.133.66.183 # 设备连接的mqtt的公网IP
+ port: 31883 # 1883映射的公网端口
+ username: cmlc
+ password: odD8#Ve7.B
+ influxdb:
+ ip: helm-influxdb #influxdb宿主机的内部ip
+ port: 8086 #influxdb宿主机的端口
+ token: YunnHJASAAdj23rasQAWd621erGAS82kaqj
+ org: cmii
+ bucket: cmii
+ minio:
+ ip: helm-minio # minio服务的宿主机ip
+ port: 39000
+ access-key: cmii # minio的访问key
+ secret-key: B#923fC7mk # minio访问secret
+ publicEndpoint: http://36.133.66.183:39000
+ shareEndpoint: http://36.133.66.183:8088/center
+ hub:
+ appKey: Zhdjk*72uU^2xz@s
diff --git a/72-202602-绵阳飞服/k8s-app/lite.yaml b/72-202602-绵阳飞服/k8s-app/lite.yaml
new file mode 100644
index 0000000..f6f7fc3
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-app/lite.yaml
@@ -0,0 +1,154 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/app-version: 6.2.0
+ cmii.app: cmii-uav-platform-lite
+ cmii.type: frontend
+ octopus.lite: frontend-app-wdd
+ annotations:
+ deployment.kubernetes.io/revision: '8'
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: cmii-uav-platform-lite
+ cmii.type: frontend
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ cmii.app: cmii-uav-platform-lite
+ cmii.type: frontend
+ spec:
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ defaultMode: 420
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-lite
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+ defaultMode: 420
+ containers:
+ - name: cmii-uav-platform-lite
+ image: 192.168.1.4:8033/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ env:
+ - name: K8S_NAMESPACE
+ value: ahydapp
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-lite
+ resources:
+ limits:
+ cpu: '1'
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ subPath: ingress-config.js
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-my-uav-260202
+
+ labels:
+ app.kubernetes.io/version: 6.2.0
+ cmii.app: cmii-uav-platform-lite
+ cmii.type: frontend
+ octopus.control: frontend-app-wdd
+
+spec:
+ ports:
+ - name: web-svc-port
+ protocol: TCP
+ port: 9528
+ targetPort: 9528
+ selector:
+ cmii.app: cmii-uav-platform-lite
+ cmii.type: frontend
+ type: ClusterIP
+ sessionAffinity: None
+status:
+ loadBalancer: {}
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-lite
+ namespace: sc-my-uav-260202
+
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "lite",
+ AppClientId: "",
+ Headers: {
+ ORG_ID: 'pago',
+ PROJECT_ID: 'prgn'
+ },
+ TdtToken: "XXXX"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
diff --git a/72-202602-绵阳飞服/k8s-app/sky-coverage.yaml b/72-202602-绵阳飞服/k8s-app/sky-coverage.yaml
new file mode 100644
index 0000000..15e87da
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-app/sky-coverage.yaml
@@ -0,0 +1,247 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/app-version: 6.0.0
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: cmii-sky-converge
+ cmii.type: backend
+ octopus/control: backend-app-1.0.0
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: cmii-sky-converge
+ cmii.type: backend
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ cmii.app: cmii-sky-converge
+ cmii.type: backend
+ spec:
+ volumes:
+ - name: application-k8s
+ configMap:
+ name: cmii-sky-converge-cm
+ items:
+ - key: application-k8s.yml
+ path: application-k8s.yml
+ - key: simAuth.license
+ path: simAuth.license
+ defaultMode: 420
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+ containers:
+ - name: cmii-sky-converge
+ image: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: K8S_NAMESPACE
+ value: ahydapp
+ - name: APPLICATION_NAME
+ value: cmii-sky-converge
+ - name: CUST_JAVA_OPTS
+ value: '-Xms2000m -Xmx4500m -Dlog4j2.formatMsgNoLookups=true'
+ - name: NACOS_REGISTRY
+ value: helm-nacos:8848
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: '8080'
+ - name: BIZ_CONFIG_GROUP
+ value: 5.7.0
+ - name: SYS_CONFIG_GROUP
+ value: 5.7.0
+ - name: IMAGE_VERSION
+ value: 5.7.0
+ - name: NACOS_USERNAME
+ value: developer
+ - name: NACOS_PASSWORD
+ value: Deve@9128201
+ - name: SPRING_PROFILES_ACTIVE
+ value: k8s,db,cache,message
+ resources:
+ limits:
+ cpu: '4'
+ memory: 6Gi
+ requests:
+ cpu: '4'
+ memory: 2Gi
+ volumeMounts:
+ - name: application-k8s
+ mountPath: /cmii/config/application-k8s.yml
+ subPath: application-k8s.yml
+ - name: application-k8s
+ mountPath: /cmii/config/simAuth.license
+ subPath: simAuth.license
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ subPath: uavcloud-devflight/cmii-sky-converge
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 30
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - mianyang
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/app-version: 6.0.0
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: cmii-sky-converge
+ cmii.type: backend
+ octopus/control: backend-app-1.0.0
+
+spec:
+ ports:
+ - name: backend-tcp
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ cmii.app: cmii-sky-converge
+ cmii.type: backend
+ type: ClusterIP
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: cmii-sky-converge-cm
+ namespace: sc-my-uav-260202
+data:
+ application-k8s.yml: |
+ converge:
+ ####################下面部分为中间件对应配置,需要确认!!!#######################
+ ############使用k8s部署的中间件可以直接用k8s里面的服务名#################
+ db:
+ ip: helm-mysql
+ port: 3306
+ username: k8s_admin
+ password: fP#UaH6qQ3)8
+ mqtt:
+ ip: helm-emqxs
+ port: 1883 # mqtt内部1883端口
+ username: cmlc
+ password: odD8#Ve7.B
+ rabbitmq:
+ ip: helm-rabbitmq
+ port: 5672
+ username: admin
+ password: nYcRN91r._hj
+ redis:
+ ip: helm-redis-master
+ port: 6379
+ password: Mcache@4522
+ influxdb:
+ ip: helm-influxdb #influxdb宿主机的内部ip
+ port: 8086 #influxdb宿主机的端口,如果docker compose文件没有改动则默认不变
+ token: YunnHJASAAdj23rasQAWd621erGAS82kaqj
+ org: cmii
+ bucket: cmii
+ minio:
+ ip: helm-minio # minio服务的宿主机ip
+ port: 39000
+ access-key: cmii # minio的访问key
+ secret-key: B#923fC7mk # minio访问secret
+
+ #######################下面部分是业务服务需要的配置,需要确认!!!#######################
+ center:
+ address: http://cmii-fly-center:8080 # cmii-fly-center服务部署的容器宿主机ip地址和暴露的端口
+ stream:
+ endpoint: http://36.133.66.183:8088 #平台地址端口
+ buckets:
+ live-srs-hls: ilm-detect
+ storage:
+ endpoint: http://36.133.66.183:8088/converge # cmii-sky-converge服务的公网请求地址,需要匹配到all-gateways-ingress里面converge服务的根路径
+ live:
+ merge:
+ tmp: /tmp/ffmpeg/
+ expired: 10
+ sms:
+ mas:
+ enable: false #内网部署改为false
+ host: http://XXX:XXX/sms/tmpsubmit
+ ecName: XXX科技有限公司
+ apId: notice
+ secretKey: notice@123
+ sign: ynYl2Vpl7
+ templateId: e4dc71ddd5c24d25b24daa01e969e24
+ expire: 3
+ limit:
+ minute: 5
+ hour: 15
+ day: 30
+ sim:
+ # true = 启用, false = 不启用
+ enable: false
+ ###固定k8s里面挂载路径
+ licensePath: /cmii/config/simAuth.license
+ # 测试环境:https://ptest.cmccsim.com:9090, 生产环境:https://certplat.cmccsim.com
+ host: https://ptest.cmccsim.com:9090
+ callbackUrl: http://183.220.196.116:8088/converge
+ simAuth.license: >
+ BOOedo/TVLbYLdKyGkFYEAljoncjd2+mKkwARpNkb0Q8D0QaZbOnCjJdMj0kUtHVRJ03CYujyVJZ8Xc1JvBTujSFgBvNwXWJN2E35TZYGUYx4uZW7WZJ9ajp3pi9Q4V9JLA4qdyd/Zaz0/T+mqaXzW0l18jA9VL25fB0tkzQYpySql76V9QAowpuVcklItcNZ8YWwK4lbPjaygBhZVNqdhbJQwqLG7io2X0QV11T5yhbu8SXCag0hoX6s93IBz0k4Aze2TZvpJ25o/NuMptWKviddrVNpVAIwT/L9kLNVkBT8T0xysX6Ku+9aLKUlLrGw4lhAHM5iHp82jduw7L9jc878ZZgOoUALLaw9axnVdnf3XfhZ75/uhx4mZ+JnNS2aNH18mVR53CGT3jxY0y1RA64e2zhMhFr/KNxVGIuZl/iAr1EGI85QWrnYGsLNbilCFlZyDzcH8tK4hDvmMtUe1xCEUF6oO9nwr+YDHGBSM1ifXLJZrvwuDI7Zim+h6pUqctWhtf6eyfyF17iBrzzt6lmSjkQtZ1kRVUxRni68/FPH9YJBKQhJItAk2h1OaUBB1Lt5vfu8OYi5S+onTmesvlIuUk7USBIFbt4kVhUpgGtV+WyddcjH6BJo3NPqCYcObR4KeLmQ/bHmN/xyVT3HMed8VhiVv0U8EuTINJxmXh+nDVmeDEUa4qYtPRGArSsGF2KGbnOOqwkyk1D/o81Zxb8Kklxn3I/CK1EM63HZLY4hGm52oRsNDjbJPFFFUdTqyQw7igHdwJYJbgxqycCAh1f8zioVVziOXwHxV85poIpVG7pP0LWrYttW1e2WdrSI4WUO1X4krfPu+7WxYHj4Cs4aTflYM9F+KVqbw6bVlg5PIPRiIy6eMRqzvl53y9eesd7eqUNgRnM13PmRDJPe6sw5BnaPn1eHBk7Mh+CAsdRnq8V0t9NkRK2aNfJFNo/PPjahDlw9DHMnJW3QGgZNR3LqFKQxDQIpR7xwgsYX5CmZo3gaBHbTx1EozCagco1tGHrRaDlJNjAYKjnus0huujI0dh+w/ybkWoN4jPQiMWx5O/oem62ga5NbHd5wS/A5e9UKfNZef1NYJyiWRYNINXr3lUl0835rb38q6+5tBKZnrJq1GZ8n2IEuw8L1YcbvtuSDBlHYGSDQ6yD5sL/qv73sXjL2jwtu+QllQt6jhFw5VUKIFRhCjuTeLbrzmcOO2TwCVZb89QPW5rGNA5sO99qormwqNkwXzsKXNx6r9B2rQ6WUdP05r1ti0YAShBdfC1CPhpt2yuKIFO3eh8J6fWjHygBX2kYq+zdb4w39d77gBayQX5lIw4MY0Dcqbyw/MvqcnXj47EUmQ+xIxkaL76C4nbN7GuMQs1rpJ4eox9qqyBeKbVGo/7/aqP6vMXl0BsPWLV5Z2jCy0HzgyBMy4mU1Q==
\ No newline at end of file
diff --git a/72-202602-绵阳飞服/k8s-deploy/helm-minio.yaml b/72-202602-绵阳飞服/k8s-deploy/helm-minio.yaml
new file mode 100644
index 0000000..28ba72c
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/helm-minio.yaml
@@ -0,0 +1,79 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ namespace: sc-my-uav-260202
+ name: helm-minio
+spec:
+ serviceName: helm-minio
+ replicas: 1
+ selector:
+ matchLabels:
+ app: helm-minio
+ template:
+ metadata:
+ labels:
+ app: helm-minio
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: minio-deploy
+ operator: In
+ values:
+ - "true"
+ containers:
+ - name: minio
+ image: 192.168.1.4:8033/cmii/minio:RELEASE.2023-06-02T23-17-26Z
+ command: ["/bin/sh", "-c"]
+ args:
+ - minio server /data --console-address ":9001"
+ ports:
+ - containerPort: 9000
+ name: api
+ - containerPort: 9001
+ name: console
+ env:
+ - name: MINIO_ACCESS_KEY
+ value: "cmii"
+ - name: MINIO_SECRET_KEY
+ value: "B#923fC7mk"
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ volumes:
+ - name: data
+# persistentVolumeClaim:
+# claimName: helm-minio
+ hostPath:
+ path: /var/lib/docker/minio-pv/
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-minio
+ namespace: sc-my-uav-260202
+spec:
+ selector:
+ app: helm-minio
+ ports:
+ - name: api
+ port: 9000
+ targetPort: 9000
+ nodePort: 39000
+ - name: console
+ port: 9001
+ targetPort: 9001
+ nodePort: 39001
+ type: NodePort
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-backend.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-backend.yaml
new file mode 100644
index 0000000..4347aff
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-backend.yaml
@@ -0,0 +1,350 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - mianyang
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-fly-center
+ image: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-fly-center
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-my-uav-260202/cmii-fly-center
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - mianyang
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-sky-converge
+ image: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-sky-converge
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-my-uav-260202/cmii-sky-converge
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-configmap.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-configmap.yaml
new file mode 100644
index 0000000..12a8ee3
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-configmap.yaml
@@ -0,0 +1,686 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-lite
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "lite",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-dashboard.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-dashboard.yaml
new file mode 100644
index 0000000..035b36a
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-dashboard.yaml
@@ -0,0 +1,315 @@
+---
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: 192.168.1.4:8033/cmii/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: 192.168.1.4:8033/cmii/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-emqx.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-emqx.yaml
new file mode 100644
index 0000000..d9b43da
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-emqx.yaml
@@ -0,0 +1,664 @@
+---
+---
+# ============== Secret - 密码管理 ==============
+apiVersion: v1
+kind: Secret
+metadata:
+ name: emqx-credentials
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+type: Opaque
+stringData:
+ # Dashboard管理员密码
+ dashboard-admin-password: "odD8#Ve7.B"
+ # MQTT用户密码
+ mqtt-admin-password: "odD8#Ve7.B"
+
+---
+# ============== ServiceAccount ==============
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+
+---
+# ============== Role - RBAC ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+rules:
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - watch
+ - list
+
+---
+# ============== RoleBinding ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+
+---
+# ============== ConfigMap - Bootstrap配置文件 ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-config
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+data:
+ # 主配置文件 - 覆盖默认配置
+ emqx.conf: |
+ # 节点配置
+ node {
+ name = "emqx@${POD_NAME}.helm-emqxs-headless.sc-my-uav-260202.svc.cluster.local"
+ cookie = "emqx-cluster-cookie-secret"
+ data_dir = "/opt/emqx/data"
+ }
+
+ # 集群配置
+ cluster {
+ name = emqxcl
+ # 单节点 建议为 manual 多节点为k8s
+ discovery_strategy = manual
+ k8s {
+ apiserver = "https://kubernetes.default.svc.cluster.local:443"
+ service_name = "helm-emqxs-headless"
+ # 这里可以改为 hostname
+ address_type = dns
+ namespace = "sc-my-uav-260202"
+ suffix = "svc.cluster.local"
+ }
+ }
+
+ # 日志配置
+ log {
+ console {
+ enable = true
+ level = info
+ }
+ file {
+ enable = true
+ level = warning
+ path = "/opt/emqx/log"
+ }
+ }
+
+ # Dashboard配置
+ dashboard {
+ listeners.http {
+ bind = "0.0.0.0:18083"
+ }
+ default_username = "admin"
+ default_password = "public"
+ }
+
+ # 监听器配置
+ listeners.tcp.default {
+ bind = "0.0.0.0:1883"
+ max_connections = 1024000
+ }
+
+ listeners.ws.default {
+ bind = "0.0.0.0:8083"
+ max_connections = 1024000
+ websocket.mqtt_path = "/mqtt"
+ }
+
+ listeners.ssl.default {
+ bind = "0.0.0.0:8883"
+ max_connections = 512000
+ }
+
+ # 认证配置 - 使用内置数据库
+ authentication = [
+ {
+ mechanism = password_based
+ backend = built_in_database
+ user_id_type = username
+ password_hash_algorithm {
+ name = sha256
+ salt_position = suffix
+ }
+ # Bootstrap文件路径 - 用于初始化用户
+ bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
+ bootstrap_type = plain
+ }
+ ]
+
+ # 授权配置
+ authorization {
+ no_match = deny
+ deny_action = disconnect
+
+ sources = [
+ {
+ type = built_in_database
+ enable = true
+ }
+ ]
+ }
+
+ # MQTT协议配置
+ mqtt {
+ max_packet_size = "1MB"
+ max_clientid_len = 65535
+ max_topic_levels = 128
+ max_qos_allowed = 2
+ max_topic_alias = 65535
+ retain_available = true
+ wildcard_subscription = true
+ shared_subscription = true
+ }
+
+---
+# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-users
+ namespace: sc-my-uav-260202
+data:
+ bootstrap_users.json: |
+ [
+ { "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
+ { "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
+ ]
+
+ # 【修改点】既然有jq,这里使用标准的 JSON 数组格式,最不容易出错
+ bootstrap_acl.json: |
+ [
+ {
+ "username": "admin",
+ "rules": [
+ {"action": "all", "permission": "allow", "topic": "#"}
+ ]
+ },
+ {
+ "username": "cmlc",
+ "rules": [
+ {"action": "publish", "permission": "allow", "topic": "#"},
+ {"action": "subscribe", "permission": "allow", "topic": "#"}
+ ]
+ }
+ ]
+
+---
+# ============== ConfigMap - 初始化脚本 (修正版) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-init-dashboard
+ namespace: sc-my-uav-260202
+data:
+ init-dashboard.sh: |
+ #!/bin/bash
+ set -e
+
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
+ EMQX_API="http://localhost:18083/api/v5"
+ ACL_FILE="/bootstrap/bootstrap_acl.json"
+
+ # 辅助函数:打印带时间戳的日志
+ log() {
+ echo "[$(date +'%H:%M:%S')] $1"
+ }
+
+ log "======================================"
+ log "初始化 Dashboard 与 ACL (Debug Version)"
+ log "======================================"
+
+ # ----------------------------------------------------------------
+ # 1. 等待 EMQX API 就绪
+ # ----------------------------------------------------------------
+ log "[1/4] 等待 EMQX API 就绪..."
+ for i in $(seq 1 60); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ log "✓ EMQX API 已就绪"
+ break
+ fi
+ if [ $i -eq 60 ]; then
+ log "✗ EMQX API 启动超时"
+ exit 1
+ fi
+ sleep 5
+ done
+
+ # ----------------------------------------------------------------
+ # 2. 修改 Dashboard 密码
+ # ----------------------------------------------------------------
+ log "[2/4] 检查/更新 Dashboard 密码..."
+
+ # 获取 Token (尝试默认密码)
+ LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -n "$TOKEN" ]; then
+ log " 检测到默认密码,正在更新..."
+ curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
+ log " ✓ Dashboard 密码已更新"
+ else
+ log " ℹ 无法使用默认密码登录,跳过更新(可能已修改)"
+ fi
+
+ # ----------------------------------------------------------------
+ # 3. 导入 ACL 规则
+ # ----------------------------------------------------------------
+ echo "[3/3] 导入ACL规则..."
+
+ # 重新登录获取最新 Token
+ LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -z "$TOKEN" ]; then
+ echo " ✗ 无法获取Token,请检查密码设置"
+ exit 0
+ fi
+
+ if [ -f "$ACL_FILE" ]; then
+ echo " 正在解析 ACL 文件: $ACL_FILE"
+
+ if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
+ echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
+ exit 0
+ fi
+
+ jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
+ USERNAME=$(echo "$user_config" | jq -r '.username // empty')
+
+ # ✅ PUT/POST 都需要 username + rules(username 是 required)
+ REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
+
+ if [ -z "$USERNAME" ]; then
+ echo " ✗ ACL 条目缺少 username,跳过"
+ continue
+ fi
+
+ echo " 配置用户 ${USERNAME} 的ACL规则..."
+
+ # 1) 优先 PUT(覆盖更新)
+ http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code" = "204" ]; then
+ echo " ✓ PUT 更新成功"
+ elif [ "$http_code" = "404" ]; then
+ # 2) 不存在则 POST 创建
+ http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code2" = "204" ]; then
+ echo " ✓ POST 创建成功"
+ else
+ echo " ✗ POST 失败 (HTTP ${http_code2}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+ else
+ echo " ✗ PUT 失败 (HTTP ${http_code}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+
+ # 3) 导入后验证(可选但强烈建议保留)
+ verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
+ -H "Authorization: Bearer ${TOKEN}" \
+ "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
+
+ if [ "$verify_code" = "200" ]; then
+ echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
+ else
+ echo " ✗ 验证失败 (HTTP ${verify_code}):$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
+ exit 1
+ fi
+ done
+
+ echo " ✓ ACL 规则导入完成"
+ else
+ echo " ℹ 未找到 ACL 文件"
+ fi
+
+---
+# ============== StatefulSet ==============
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ podManagementPolicy: Parallel
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - mianyang
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: cmii.app
+ operator: In
+ values:
+ - helm-emqxs
+ topologyKey: kubernetes.io/hostname
+
+ imagePullSecrets:
+ - name: harborsecret
+
+ serviceAccountName: helm-emqxs
+
+ securityContext:
+ fsGroup: 1000
+ runAsUser: 1000
+
+ # InitContainer - 准备bootstrap文件
+ initContainers:
+ - name: prepare-bootstrap
+ # 动态选择 tools 镜像
+ image: 192.168.1.4:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+ # =========================================================
+ # 权限: 必须以 root 身份运行才能 chown
+ # =========================================================
+ securityContext:
+ runAsUser: 0
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "准备bootstrap文件..."
+
+ # 创建数据目录
+ mkdir -p /opt/emqx/data
+
+ # 复制bootstrap文件到数据目录
+ # 只在文件不存在时复制,避免覆盖已有数据
+ if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
+ cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
+ echo "✓ 已复制用户bootstrap文件"
+ else
+ echo "ℹ 用户bootstrap文件已存在,跳过"
+ fi
+
+ # 设置权限 (现在有root权限,可以成功)
+ chown -R 1000:1000 /opt/emqx/data
+
+ echo "✓ Bootstrap准备完成"
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ - name: bootstrap-users
+ mountPath: /bootstrap-src
+
+ containers:
+ # 主容器 - EMQX
+ - name: emqx
+ # 动态选择 emqx 镜像
+ image: 192.168.1.4:8033/cmii/emqx:5.8.8
+ imagePullPolicy: IfNotPresent
+
+ env:
+ # Pod信息
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: EMQX_DATA_DIR
+ value: "/opt/emqx/data"
+
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: ws
+ containerPort: 8083
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ limits:
+ cpu: "2000m"
+ memory: "2Gi"
+
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 3
+
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ startupProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 30
+
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ # 使用 subPath 挂载单个配置文件,避免覆盖目录
+ - name: bootstrap-config
+ mountPath: /opt/emqx/etc/emqx.conf
+ subPath: emqx.conf
+
+ # Sidecar - 初始化Dashboard密码和ACL
+ - name: init-dashboard
+ # 动态选择 tools 镜像
+ image: 192.168.1.4:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 等待主容器启动
+ echo "等待EMQX启动..."
+ sleep 20
+
+ # 执行初始化
+ /bin/sh /scripts/init-dashboard.sh
+
+ # 保持运行
+ echo "初始化完成,进入守护模式..."
+ while true; do sleep 3600; done
+
+ env:
+ - name: DASHBOARD_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: emqx-credentials
+ key: dashboard-admin-password
+
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "64Mi"
+ limits:
+ cpu: "200m"
+ memory: "128Mi"
+
+ volumeMounts:
+ - name: init-script
+ mountPath: /scripts
+ - name: bootstrap-users
+ mountPath: /bootstrap
+
+ volumes:
+ - name: bootstrap-config
+ configMap:
+ name: emqx-bootstrap-config
+ - name: bootstrap-users
+ configMap:
+ name: emqx-bootstrap-users
+ - name: init-script
+ configMap:
+ name: emqx-init-dashboard
+ defaultMode: 0755
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+
+---
+# ============== Service - Headless ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ targetPort: 4370
+
+---
+# ============== Service - NodePort ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ nodePort: 31883
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ nodePort: 38085
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ nodePort: 38083
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-frontend.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-frontend.yaml
new file mode 100644
index 0000000..b8f36d6
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-frontend.yaml
@@ -0,0 +1,114 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-lite
+ image: 192.168.1.4:8033/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-lite
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-lite
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: "2.0"
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-ingress-13014.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-ingress-13014.yaml
new file mode 100644
index 0000000..88c7a75
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-ingress-13014.yaml
@@ -0,0 +1,995 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/enable-cors: 'true'
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform
+ port:
+ number: 9528
+ - path: /supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-suav-platform-supervision
+ port:
+ number: 9528
+ - path: /supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-suav-platform-supervisionh5
+ port:
+ number: 9528
+ - path: /pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform
+ port:
+ number: 9528
+ - path: /ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-ai-brain
+ port:
+ number: 9528
+ - path: /armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-armypeople
+ port:
+ number: 9528
+ - path: /awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-awareness
+ port:
+ number: 9528
+ - path: /base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-base
+ port:
+ number: 9528
+ - path: /blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-blockchain
+ port:
+ number: 9528
+ - path: /classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-classification
+ port:
+ number: 9528
+ - path: /cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-cms-portal
+ port:
+ number: 9528
+ - path: /detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-detection
+ port:
+ number: 9528
+ - path: /dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-dikongzhixingh5
+ port:
+ number: 9528
+ - path: /dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-dispatchh5
+ port:
+ number: 9528
+ - path: /emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-emergency-rescue
+ port:
+ number: 9528
+ - path: /eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-eventsh5
+ port:
+ number: 9528
+ - path: /flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-flight-control
+ port:
+ number: 9528
+ - path: /hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-hljtt
+ port:
+ number: 9528
+ - path: /hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-hyperspectral
+ port:
+ number: 9528
+ - path: /iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-iot-manager
+ port:
+ number: 9528
+ - path: /jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-jiangsuwenlv
+ port:
+ number: 9528
+ - path: /logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-logistics
+ port:
+ number: 9528
+ - path: /media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-media
+ port:
+ number: 9528
+ - path: /mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-mianyangbackend
+ port:
+ number: 9528
+ - path: /multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-multiterminal
+ port:
+ number: 9528
+ - path: /mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-mws
+ port:
+ number: 9528
+ - path: /oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-oms
+ port:
+ number: 9528
+ - path: /open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-open
+ port:
+ number: 9528
+ - path: /pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-pilot2-to-cloud
+ port:
+ number: 9528
+ - path: /qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-qingdao
+ port:
+ number: 9528
+ - path: /qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-qinghaitourism
+ port:
+ number: 9528
+ - path: /renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-renyike
+ port:
+ number: 9528
+ - path: /scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-scanner
+ port:
+ number: 9528
+ - path: /security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-security
+ port:
+ number: 9528
+ - path: /securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-securityh5
+ port:
+ number: 9528
+ - path: /seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-seniclive
+ port:
+ number: 9528
+ - path: /share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-share
+ port:
+ number: 9528
+ - path: /smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-smauth
+ port:
+ number: 9528
+ - path: /smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-smsecret
+ port:
+ number: 9528
+ - path: /splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-splice
+ port:
+ number: 9528
+ - path: /threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-threedsimulation
+ port:
+ number: 9528
+ - path: /traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-traffic
+ port:
+ number: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uas
+ port:
+ number: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uaskny
+ port:
+ number: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uasms
+ port:
+ number: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-uasmskny
+ port:
+ number: 9528
+ - path: /visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-platform-visualization
+ port:
+ number: 9528
+ - path: /uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-platform-manager
+ port:
+ number: 9528
+ - path: /secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-platform-security-center
+ port:
+ number: 9528
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/enable-cors: 'true'
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-data
+ port:
+ number: 8080
+ - host: cmii-admin-gateway.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-gateway
+ port:
+ number: 8080
+ - host: cmii-admin-user.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-user
+ port:
+ number: 8080
+ - host: cmii-app-release.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-app-release
+ port:
+ number: 8080
+ - host: cmii-open-gateway.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-open-gateway
+ port:
+ number: 8080
+ - host: cmii-sky-converge.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-sky-converge
+ port:
+ number: 8080
+ - host: cmii-suav-supervision.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-suav-supervision
+ port:
+ number: 8080
+ - host: cmii-uas-datahub.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-datahub
+ port:
+ number: 8080
+ - host: cmii-uas-gateway.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-gateway
+ port:
+ number: 8080
+ - host: cmii-uas-lifecycle.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-lifecycle
+ port:
+ number: 8080
+ - host: cmii-uav-advanced5g.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-advanced5g
+ port:
+ number: 8080
+ - host: cmii-uav-airspace.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-airspace
+ port:
+ number: 8080
+ - host: cmii-uav-alarm.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-alarm
+ port:
+ number: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-autowaypoint
+ port:
+ number: 8080
+ - host: cmii-uav-brain.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-brain
+ port:
+ number: 8080
+ - host: cmii-uav-bridge.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-bridge
+ port:
+ number: 8080
+ - host: cmii-uav-cloud-live.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-cloud-live
+ port:
+ number: 8080
+ - host: cmii-uav-clusters.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-clusters
+ port:
+ number: 8080
+ - host: cmii-uav-cms.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-cms
+ port:
+ number: 8080
+ - host: cmii-uav-data-post-process.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-data-post-process
+ port:
+ number: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-depotautoreturn
+ port:
+ number: 8080
+ - host: cmii-uav-developer.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-developer
+ port:
+ number: 8080
+ - host: cmii-uav-device.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-device
+ port:
+ number: 8080
+ - host: cmii-uav-emergency.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-emergency
+ port:
+ number: 8080
+ - host: cmii-uav-fwdd.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-fwdd
+ port:
+ number: 8080
+ - host: cmii-uav-gateway.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-gateway
+ port:
+ number: 8080
+ - host: cmii-uav-gis-server.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-gis-server
+ port:
+ number: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-grid-datasource
+ port:
+ number: 8080
+ - host: cmii-uav-grid-engine.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-grid-engine
+ port:
+ number: 8080
+ - host: cmii-uav-grid-manage.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-grid-manage
+ port:
+ number: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-industrial-portfolio
+ port:
+ number: 8080
+ - host: cmii-uav-integration.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-integration
+ port:
+ number: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-iot-dispatcher
+ port:
+ number: 8080
+ - host: cmii-uav-iot-manager.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-iot-manager
+ port:
+ number: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-kpi-monitor
+ port:
+ number: 8080
+ - host: cmii-uav-logger.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-logger
+ port:
+ number: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-material-warehouse
+ port:
+ number: 8080
+ - host: cmii-uav-mission.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-mission
+ port:
+ number: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-mqtthandler
+ port:
+ number: 8080
+ - host: cmii-uav-multilink.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-multilink
+ port:
+ number: 8080
+ - host: cmii-uav-notice.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-notice
+ port:
+ number: 8080
+ - host: cmii-uav-oauth.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-oauth
+ port:
+ number: 8080
+ - host: cmii-uav-process.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-process
+ port:
+ number: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-sec-awareness
+ port:
+ number: 8080
+ - host: cmii-uav-security-trace.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-security-trace
+ port:
+ number: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-sense-adapter
+ port:
+ number: 8080
+ - host: cmii-uav-surveillance.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-surveillance
+ port:
+ number: 8080
+ - host: cmii-uav-sync.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-sync
+ port:
+ number: 8080
+ - host: cmii-uav-tcp-server.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-tcp-server
+ port:
+ number: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-threedsimulation
+ port:
+ number: 8080
+ - host: cmii-uav-tower.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-tower
+ port:
+ number: 8080
+ - host: cmii-uav-user.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-user
+ port:
+ number: 8080
+ - host: cmii-uav-watchdog.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-watchdog
+ port:
+ number: 8080
+ - host: cmii-uav-waypoint.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-waypoint
+ port:
+ number: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-pyfusion
+ port:
+ number: 8080
+ - host: cmii-uavms-security-center.uavcloud-sc-my-202602.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uavms-security-center
+ port:
+ number: 8080
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.2
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/enable-cors: 'true'
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/proxy-read-timeout: '3600'
+ nginx.ingress.kubernetes.io/proxy-send-timeout: '3600'
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-admin-gateway
+ port:
+ number: 8080
+ - path: /open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-open-gateway
+ port:
+ number: 8080
+ - path: /api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uav-gateway
+ port:
+ number: 8080
+ - path: /uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-uas-gateway
+ port:
+ number: 8080
+ - path: /converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: cmii-sky-converge
+ port:
+ number: 8080
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-ingress.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-ingress.yaml
new file mode 100644
index 0000000..67541fe
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-ingress.yaml
@@ -0,0 +1,832 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/supervision)$ $1/ redirect;
+ rewrite ^(/supervisionh5)$ $1/ redirect;
+ rewrite ^(/pangu)$ $1/ redirect;
+ rewrite ^(/ai-brain)$ $1/ redirect;
+ rewrite ^(/armypeople)$ $1/ redirect;
+ rewrite ^(/awareness)$ $1/ redirect;
+ rewrite ^(/base)$ $1/ redirect;
+ rewrite ^(/blockchain)$ $1/ redirect;
+ rewrite ^(/classification)$ $1/ redirect;
+ rewrite ^(/cmsportal)$ $1/ redirect;
+ rewrite ^(/detection)$ $1/ redirect;
+ rewrite ^(/dikongzhixingh5)$ $1/ redirect;
+ rewrite ^(/dispatchh5)$ $1/ redirect;
+ rewrite ^(/emergency)$ $1/ redirect;
+ rewrite ^(/eventsh5)$ $1/ redirect;
+ rewrite ^(/flight-control)$ $1/ redirect;
+ rewrite ^(/hljtt)$ $1/ redirect;
+ rewrite ^(/hyper)$ $1/ redirect;
+ rewrite ^(/iot)$ $1/ redirect;
+ rewrite ^(/jiangsuwenlv)$ $1/ redirect;
+ rewrite ^(/lite)$ $1/ redirect;
+ rewrite ^(/logistics)$ $1/ redirect;
+ rewrite ^(/media)$ $1/ redirect;
+ rewrite ^(/mianyangbackend)$ $1/ redirect;
+ rewrite ^(/multiterminal)$ $1/ redirect;
+ rewrite ^(/mws)$ $1/ redirect;
+ rewrite ^(/oms)$ $1/ redirect;
+ rewrite ^(/open)$ $1/ redirect;
+ rewrite ^(/pilot2cloud)$ $1/ redirect;
+ rewrite ^(/qingdao)$ $1/ redirect;
+ rewrite ^(/qinghaitourism)$ $1/ redirect;
+ rewrite ^(/renyike)$ $1/ redirect;
+ rewrite ^(/scanner)$ $1/ redirect;
+ rewrite ^(/security)$ $1/ redirect;
+ rewrite ^(/securityh5)$ $1/ redirect;
+ rewrite ^(/seniclive)$ $1/ redirect;
+ rewrite ^(/share)$ $1/ redirect;
+ rewrite ^(/smauth)$ $1/ redirect;
+ rewrite ^(/smsecret)$ $1/ redirect;
+ rewrite ^(/splice)$ $1/ redirect;
+ rewrite ^(/threedsimulation)$ $1/ redirect;
+ rewrite ^(/traffic)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/visualization)$ $1/ redirect;
+ rewrite ^(/uavmsmanager)$ $1/ redirect;
+ rewrite ^(/secenter)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervision
+ servicePort: 9528
+ - path: /supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervisionh5
+ servicePort: 9528
+ - path: /pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-ai-brain
+ servicePort: 9528
+ - path: /armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-armypeople
+ servicePort: 9528
+ - path: /awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-awareness
+ servicePort: 9528
+ - path: /base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-base
+ servicePort: 9528
+ - path: /blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-blockchain
+ servicePort: 9528
+ - path: /classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-classification
+ servicePort: 9528
+ - path: /cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-cms-portal
+ servicePort: 9528
+ - path: /detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-detection
+ servicePort: 9528
+ - path: /dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dikongzhixingh5
+ servicePort: 9528
+ - path: /dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dispatchh5
+ servicePort: 9528
+ - path: /emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-emergency-rescue
+ servicePort: 9528
+ - path: /eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-eventsh5
+ servicePort: 9528
+ - path: /flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-flight-control
+ servicePort: 9528
+ - path: /hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hljtt
+ servicePort: 9528
+ - path: /hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hyperspectral
+ servicePort: 9528
+ - path: /iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-iot-manager
+ servicePort: 9528
+ - path: /jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-jiangsuwenlv
+ servicePort: 9528
+ - path: /lite/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-lite
+ servicePort: 9528
+ - path: /logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-logistics
+ servicePort: 9528
+ - path: /media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-media
+ servicePort: 9528
+ - path: /mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mianyangbackend
+ servicePort: 9528
+ - path: /multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-multiterminal
+ servicePort: 9528
+ - path: /mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mws
+ servicePort: 9528
+ - path: /oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-oms
+ servicePort: 9528
+ - path: /open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-open
+ servicePort: 9528
+ - path: /pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-pilot2-to-cloud
+ servicePort: 9528
+ - path: /qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qingdao
+ servicePort: 9528
+ - path: /qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qinghaitourism
+ servicePort: 9528
+ - path: /renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-renyike
+ servicePort: 9528
+ - path: /scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-scanner
+ servicePort: 9528
+ - path: /security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-security
+ servicePort: 9528
+ - path: /securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-securityh5
+ servicePort: 9528
+ - path: /seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-seniclive
+ servicePort: 9528
+ - path: /share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-share
+ servicePort: 9528
+ - path: /smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smauth
+ servicePort: 9528
+ - path: /smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smsecret
+ servicePort: 9528
+ - path: /splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-splice
+ servicePort: 9528
+ - path: /threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-threedsimulation
+ servicePort: 9528
+ - path: /traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-traffic
+ servicePort: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uaskny
+ servicePort: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasmskny
+ servicePort: 9528
+ - path: /visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-visualization
+ servicePort: 9528
+ - path: /uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-manager
+ servicePort: 9528
+ - path: /secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-security-center
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-data
+ servicePort: 8080
+ - host: cmii-admin-gateway.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - host: cmii-admin-user.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-user
+ servicePort: 8080
+ - host: cmii-app-release.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-app-release
+ servicePort: 8080
+ - host: cmii-open-gateway.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - host: cmii-sky-converge.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
+ - host: cmii-suav-supervision.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-supervision
+ servicePort: 8080
+ - host: cmii-uas-datahub.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-datahub
+ servicePort: 8080
+ - host: cmii-uas-gateway.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - host: cmii-uas-lifecycle.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-lifecycle
+ servicePort: 8080
+ - host: cmii-uav-advanced5g.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-advanced5g
+ servicePort: 8080
+ - host: cmii-uav-airspace.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-airspace
+ servicePort: 8080
+ - host: cmii-uav-alarm.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-alarm
+ servicePort: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-autowaypoint
+ servicePort: 8080
+ - host: cmii-uav-brain.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-brain
+ servicePort: 8080
+ - host: cmii-uav-bridge.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-bridge
+ servicePort: 8080
+ - host: cmii-uav-cloud-live.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cloud-live
+ servicePort: 8080
+ - host: cmii-uav-clusters.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-clusters
+ servicePort: 8080
+ - host: cmii-uav-cms.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cms
+ servicePort: 8080
+ - host: cmii-uav-data-post-process.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-data-post-process
+ servicePort: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-depotautoreturn
+ servicePort: 8080
+ - host: cmii-uav-developer.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-developer
+ servicePort: 8080
+ - host: cmii-uav-device.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-device
+ servicePort: 8080
+ - host: cmii-uav-emergency.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-emergency
+ servicePort: 8080
+ - host: cmii-uav-fwdd.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-fwdd
+ servicePort: 8080
+ - host: cmii-uav-gateway.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - host: cmii-uav-gis-server.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gis-server
+ servicePort: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-datasource
+ servicePort: 8080
+ - host: cmii-uav-grid-engine.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-engine
+ servicePort: 8080
+ - host: cmii-uav-grid-manage.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-manage
+ servicePort: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-industrial-portfolio
+ servicePort: 8080
+ - host: cmii-uav-integration.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-integration
+ servicePort: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-dispatcher
+ servicePort: 8080
+ - host: cmii-uav-iot-manager.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-manager
+ servicePort: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-kpi-monitor
+ servicePort: 8080
+ - host: cmii-uav-logger.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-logger
+ servicePort: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-material-warehouse
+ servicePort: 8080
+ - host: cmii-uav-mission.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mission
+ servicePort: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mqtthandler
+ servicePort: 8080
+ - host: cmii-uav-multilink.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-multilink
+ servicePort: 8080
+ - host: cmii-uav-notice.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-notice
+ servicePort: 8080
+ - host: cmii-uav-oauth.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-oauth
+ servicePort: 8080
+ - host: cmii-uav-process.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-process
+ servicePort: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sec-awareness
+ servicePort: 8080
+ - host: cmii-uav-security-trace.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-security-trace
+ servicePort: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sense-adapter
+ servicePort: 8080
+ - host: cmii-uav-surveillance.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-surveillance
+ servicePort: 8080
+ - host: cmii-uav-sync.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sync
+ servicePort: 8080
+ - host: cmii-uav-tcp-server.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tcp-server
+ servicePort: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-threedsimulation
+ servicePort: 8080
+ - host: cmii-uav-tower.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tower
+ servicePort: 8080
+ - host: cmii-uav-user.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-user
+ servicePort: 8080
+ - host: cmii-uav-watchdog.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-watchdog
+ servicePort: 8080
+ - host: cmii-uav-waypoint.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-waypoint
+ servicePort: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-pyfusion
+ servicePort: 8080
+ - host: cmii-uavms-security-center.uavcloud-mianyang.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-security-center
+ servicePort: 8080
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - path: /open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - path: /api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - path: /uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-mongo.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-mongo.yaml
new file mode 100644
index 0000000..bfc6490
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-mongo.yaml
@@ -0,0 +1,78 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ ports:
+ - port: 27017
+ name: server-27017
+ targetPort: 27017
+ nodePort: 37017
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+spec:
+ serviceName: helm-mongo
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: "2.0"
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: helm-mongo
+ image: 192.168.1.4:8033/cmii/mongo:5.0
+ resources: {}
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ volumeMounts:
+ - name: mongo-data
+ mountPath: /data/db
+ readOnly: false
+ subPath: default/helm-mongo/data/db
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+---
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-mysql.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-mysql.yaml
new file mode 100644
index 0000000..1ee6339
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-mysql.yaml
@@ -0,0 +1,410 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ annotations: {}
+secrets:
+ - name: helm-mysql
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ mysql-root-password: "UXpmWFFoZDNiUQ=="
+ mysql-password: "S0F0cm5PckFKNw=="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ my.cnf: |-
+
+ [mysqld]
+ port=3306
+ basedir=/opt/bitnami/mysql
+ datadir=/bitnami/mysql/data
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ log-error=/bitnami/mysql/data/error.log
+ general_log_file = /bitnami/mysql/data/general.log
+ slow_query_log_file = /bitnami/mysql/data/slow.log
+ innodb_data_file_path = ibdata1:512M:autoextend
+ innodb_buffer_pool_size = 512M
+ innodb_buffer_pool_instances = 2
+ innodb_log_file_size = 512M
+ innodb_log_files_in_group = 4
+ innodb_log_files_in_group = 4
+ log-bin = /bitnami/mysql/data/mysql-bin
+ max_binlog_size=1G
+ transaction_isolation = REPEATABLE-READ
+ default_storage_engine = innodb
+ character-set-server = utf8mb4
+ collation-server=utf8mb4_bin
+ binlog_format = ROW
+ binlog_rows_query_log_events=on
+ binlog_cache_size=4M
+ binlog_expire_logs_seconds = 1296000
+ max_binlog_cache_size=2G
+ gtid_mode = on
+ enforce_gtid_consistency = 1
+ sync_binlog = 1
+ innodb_flush_log_at_trx_commit = 1
+ innodb_flush_method = O_DIRECT
+ log_slave_updates=1
+ relay_log_recovery = 1
+ relay-log-purge = 1
+ default_time_zone = '+08:00'
+ lower_case_table_names=1
+ log_bin_trust_function_creators=1
+ group_concat_max_len=67108864
+ innodb_io_capacity = 4000
+ innodb_io_capacity_max = 8000
+ innodb_flush_sync = 0
+ innodb_flush_neighbors = 0
+ innodb_write_io_threads = 8
+ innodb_read_io_threads = 8
+ innodb_purge_threads = 4
+ innodb_page_cleaners = 4
+ innodb_open_files = 65535
+ innodb_max_dirty_pages_pct = 50
+ innodb_lru_scan_depth = 4000
+ innodb_checksum_algorithm = crc32
+ innodb_lock_wait_timeout = 10
+ innodb_rollback_on_timeout = 1
+ innodb_print_all_deadlocks = 1
+ innodb_file_per_table = 1
+ innodb_online_alter_log_max_size = 4G
+ innodb_stats_on_metadata = 0
+ innodb_thread_concurrency = 0
+ innodb_sync_spin_loops = 100
+ innodb_spin_wait_delay = 30
+ lock_wait_timeout = 3600
+ slow_query_log = 1
+ long_query_time = 10
+ log_queries_not_using_indexes =1
+ log_throttle_queries_not_using_indexes = 60
+ min_examined_row_limit = 100
+ log_slow_admin_statements = 1
+ log_slow_slave_statements = 1
+ default_authentication_plugin=mysql_native_password
+ skip-name-resolve=1
+ explicit_defaults_for_timestamp=1
+ plugin_dir=/opt/bitnami/mysql/plugin
+ max_allowed_packet=128M
+ max_connections = 2000
+ max_connect_errors = 1000000
+ table_definition_cache=2000
+ table_open_cache_instances=64
+ tablespace_definition_cache=1024
+ thread_cache_size=256
+ interactive_timeout = 600
+ wait_timeout = 600
+ tmpdir=/opt/bitnami/mysql/tmp
+ max_allowed_packet=32M
+ bind-address=0.0.0.0
+ performance_schema = 1
+ performance_schema_instrument = '%memory%=on'
+ performance_schema_instrument = '%lock%=on'
+ innodb_monitor_enable=ALL
+
+ [mysql]
+ no-auto-rehash
+
+ [mysqldump]
+ quick
+ max_allowed_packet = 32M
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ default-character-set=UTF8
+ plugin_dir=/opt/bitnami/mysql/plugin
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql-init-scripts
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ create_users_grants_core.sql: |-
+ create user zyly@'%' identified by 'Cmii@451315';
+ grant select on *.* to zyly@'%';
+ create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
+ grant all on *.* to zyly_qc@'%';
+ create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
+ grant all on *.* to k8s_admin@'%';
+ create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
+ grant all on *.* to audit_dba@'%';
+ create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
+ GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
+ create user monitor@'%' identified by 'PL3#nGtrWbf-';
+ grant REPLICATION CLIENT on *.* to monitor@'%';
+ flush privileges;
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+spec:
+ ports:
+ - name: mysql
+ protocol: TCP
+ port: 13306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.app: mysql
+ cmii.type: middleware
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: mysql
+ port: 3306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: NodePort
+ ports:
+ - name: mysql
+ port: 3306
+ protocol: TCP
+ targetPort: mysql
+ nodePort: 33306
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ serviceName: helm-mysql
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-mysql
+ affinity: {}
+ nodeSelector:
+ mysql-deploy: "true"
+ securityContext:
+ fsGroup: 1001
+ initContainers:
+ - name: change-volume-permissions
+ image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ containers:
+ - name: mysql
+ image: 192.168.1.4:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-mysql
+ key: mysql-root-password
+ - name: MYSQL_DATABASE
+ value: "cmii"
+ ports:
+ - name: mysql
+ containerPort: 3306
+ livenessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ readinessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ startupProbe:
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ - name: config
+ mountPath: /opt/bitnami/mysql/conf/my.cnf
+ subPath: my.cnf
+ volumes:
+ - name: config
+ configMap:
+ name: helm-mysql
+ - name: custom-init-scripts
+ configMap:
+ name: helm-mysql-init-scripts
+ - name: mysql-data
+ hostPath:
+ path: /var/lib/docker/mysql-pv/sc-my-uav-260202/
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-nacos.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-nacos.yaml
new file mode 100644
index 0000000..dc69f9f
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-nacos.yaml
@@ -0,0 +1,130 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-nacos-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: "2.0"
+data:
+ mysql.db.name: "cmii_nacos_config"
+ mysql.db.host: "helm-mysql"
+ mysql.port: "3306"
+ mysql.user: "k8s_admin"
+ mysql.password: "fP#UaH6qQ3)8"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-nacos
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: "2.0"
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ ports:
+ - port: 8848
+ name: server
+ targetPort: 8848
+ nodePort: 38848
+ - port: 9848
+ name: server12
+ targetPort: 9848
+ - port: 9849
+ name: server23
+ targetPort: 9849
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-nacos
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: "2.0"
+spec:
+ serviceName: helm-nacos
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: "2.0"
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: nacos-server
+ image: 192.168.1.4:8033/cmii/nacos-server:v2.1.2
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ - containerPort: 9848
+ name: tcp-9848
+ - containerPort: 9849
+ name: tcp-9849
+ env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.name
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.port
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.user
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.password
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.host
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: "hostname"
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+---
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-nfs-test.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-nfs-test.yaml
new file mode 100644
index 0000000..315bde1
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-prod-distribute
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 192.168.1.4:8033/cmii/busybox:latest
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-nfs.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-nfs.yaml
new file mode 100644
index 0000000..58d6b15
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-prod-distribute
+provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: 192.168.1.4:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-storage
+ - name: NFS_SERVER
+ value: 192.168.1.6
+ - name: NFS_PATH
+ value: /var/lib/docker/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 192.168.1.6
+ path: /var/lib/docker/nfs_data
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-pvc.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-pvc.yaml
new file mode 100644
index 0000000..22e71e1
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-pvc.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: "2.0"
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 100Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-emqxs
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: "2.0"
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-mongo
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: "2.0"
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 30Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-rabbitmq
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: "2.0"
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-rabbitmq.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-rabbitmq.yaml
new file mode 100644
index 0000000..26e743e
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-rabbitmq.yaml
@@ -0,0 +1,328 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+automountServiceAccountToken: true
+secrets:
+ - name: helm-rabbitmq
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+type: Opaque
+data:
+ rabbitmq-password: "blljUk45MXIuX2hq"
+ rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-rabbitmq-config
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+data:
+ rabbitmq.conf: |-
+ ## Username and password
+ ##
+ default_user = admin
+ default_pass = nYcRN91r._hj
+ ## Clustering
+ ##
+ cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
+ cluster_formation.node_cleanup.interval = 10
+ cluster_formation.node_cleanup.only_log_warning = true
+ cluster_partition_handling = autoheal
+ # queue master locator
+ queue_master_locator = min-masters
+ # enable guest user
+ loopback_users.guest = false
+ #default_vhost = default-vhost
+ #disk_free_limit.absolute = 50MB
+ #load_definitions = /app/load_definition.json
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+subjects:
+ - kind: ServiceAccount
+ name: helm-rabbitmq
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: helm-rabbitmq-endpoint-reader
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ clusterIP: None
+ ports:
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ - name: dist
+ port: 25672
+ targetPort: dist
+ - name: dashboard
+ port: 15672
+ targetPort: stats
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+ publishNotReadyAddresses: true
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ type: NodePort
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ nodePort: 35672
+ - name: dashboard
+ port: 15672
+ targetPort: dashboard
+ nodePort: 36675
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ serviceName: helm-rabbitmq-headless
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+ annotations:
+ checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
+ checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-rabbitmq
+ affinity: {}
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ - name: volume-permissions
+ image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ securityContext:
+ runAsUser: 0
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ containers:
+ - name: rabbitmq
+ image: 192.168.1.4:8033/cmii/rabbitmq:3.9.12-debian-10-r3
+ imagePullPolicy: "Always"
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: "helm-rabbitmq-headless"
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: K8S_HOSTNAME_SUFFIX
+ value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: RABBITMQ_MNESIA_DIR
+ value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: "-"
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-erlang-cookie
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: "admin"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-password
+ - name: RABBITMQ_PLUGINS
+ value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
+ ports:
+ - name: amqp
+ containerPort: 5672
+ - name: dist
+ containerPort: 25672
+ - name: dashboard
+ containerPort: 15672
+ - name: epmd
+ containerPort: 4369
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: configuration
+ mountPath: /bitnami/rabbitmq/conf
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ volumes:
+ - name: configuration
+ configMap:
+ name: helm-rabbitmq-config
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-redis.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-redis.yaml
new file mode 100644
index 0000000..d557ead
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-redis.yaml
@@ -0,0 +1,585 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: true
+metadata:
+ name: helm-redis
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-redis
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ redis-password: "TWNhY2hlQDQ1MjI="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-configuration
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ # End of common configuration
+ master.conf: |-
+ dir /data
+ # User-supplied master configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of master configuration
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ # User-supplied replica configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of replica configuration
+---
+# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-health
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-scripts
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec redis-server "${ARGS[@]}"
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo 26379
+ ;;
+ "REDIS")
+ echo 6379
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec redis-server "${ARGS[@]}"
+---
+# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+---
+# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-master
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ type: ClusterIP
+
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/component: replica
+---
+# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-master
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ affinity: {}
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ imagePullSecrets:
+ - name: harborsecret
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/component: replica
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.sc-my-uav-260202.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+
diff --git a/72-202602-绵阳飞服/k8s-deploy/k8s-srs.yaml b/72-202602-绵阳飞服/k8s-deploy/k8s-srs.yaml
new file mode 100644
index 0000000..dd5e2cc
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-deploy/k8s-srs.yaml
@@ -0,0 +1,496 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-srs-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: live-srs
+ cmii.type: live
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+data:
+ srs.rtc.conf: |-
+ listen 31935;
+ max_connections 4096;
+ srs_log_tank console;
+ srs_log_level info;
+ srs_log_file /home/srs.log;
+ daemon off;
+ http_api {
+ enabled on;
+ listen 1985;
+ crossdomain on;
+ }
+ stats {
+ network 0;
+ }
+ http_server {
+ enabled on;
+ listen 8080;
+ dir /home/hls;
+ }
+ srt_server {
+ enabled on;
+ listen 30556;
+ maxbw 1000000000;
+ connect_timeout 4000;
+ peerlatency 600;
+ recvlatency 600;
+ }
+ rtc_server {
+ enabled on;
+ listen 30090;
+ candidate $CANDIDATE;
+ }
+ vhost __defaultVhost__ {
+ http_hooks {
+ enabled on;
+ on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
+ }
+ http_remux {
+ enabled on;
+ }
+ rtc {
+ enabled on;
+ rtmp_to_rtc on;
+ rtc_to_rtmp on;
+ keep_bframe off;
+ }
+ tcp_nodelay on;
+ min_latency on;
+ play {
+ gop_cache off;
+ mw_latency 100;
+ mw_msgs 10;
+ }
+ publish {
+ firstpkt_timeout 8000;
+ normal_timeout 4000;
+ mr on;
+ }
+ dvr {
+ enabled off;
+ dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
+ dvr_plan session;
+ }
+ hls {
+ enabled on;
+ hls_path /home/hls;
+ hls_fragment 10;
+ hls_window 60;
+ hls_m3u8_file [app]/[stream].m3u8;
+ hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
+ hls_cleanup on;
+ hls_entry_prefix http://36.133.66.183:8088;
+ }
+ }
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc-exporter
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ nodePort: 31935
+ - name: rtc
+ protocol: UDP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: rtc-tcp
+ protocol: TCP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: srt
+ protocol: UDP
+ port: 30556
+ targetPort: 30556
+ nodePort: 30556
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ nodePort: 30080
+ selector:
+ srs-role: rtc
+ type: NodePort
+ sessionAffinity: None
+ externalTrafficPolicy: Cluster
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srsrtc-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: helm-live-srs-rtc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-srs
+ cmii.type: live
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ srs-role: rtc
+ template:
+ metadata:
+ labels:
+ srs-role: rtc
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-srs-cm
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ defaultMode: 420
+ - name: srs-vol
+ emptyDir:
+ sizeLimit: 8Gi
+ containers:
+ - name: srs-rtc
+ image: 192.168.1.4:8033/cmii/srs:v5.0.195
+ ports:
+ - name: srs-rtmp
+ containerPort: 31935
+ protocol: TCP
+ - name: srs-api
+ containerPort: 1985
+ protocol: TCP
+ - name: srs-flv
+ containerPort: 8080
+ protocol: TCP
+ - name: srs-webrtc
+ containerPort: 30090
+ protocol: UDP
+ - name: srs-webrtc-tcp
+ containerPort: 30090
+ protocol: TCP
+ - name: srs-srt
+ containerPort: 30556
+ protocol: UDP
+ env:
+ - name: CANDIDATE
+ value: 36.133.66.183
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /usr/local/srs/conf/docker.conf
+ subPath: docker.conf
+ - name: srs-vol
+ mountPath: /home/dvr
+ subPath: sc-my-uav-260202/helm-live/dvr
+ - name: srs-vol
+ mountPath: /home/hls
+ subPath: sc-my-uav-260202/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ - name: oss-adaptor
+ image: 192.168.1.4:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ env:
+ - name: OSS_ENDPOINT
+ value: 'http://helm-minio:9000'
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: 'B#923fC7mk'
+ - name: OSS_BUCKET
+ value: live-cluster-hls
+ - name: SRS_OP
+ value: 'http://helm-live-op-svc-v2:8080'
+ - name: MYSQL_ENDPOINT
+ value: 'helm-mysql:3306'
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: 'yes'
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-vol
+ mountPath: /cmii/share/hls
+ subPath: sc-my-uav-260202/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ serviceName: helm-live-srsrtc-svc
+ podManagementPolicy: OrderedReady
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: 0
+ revisionHistoryLimit: 10
+---
+# live-srs部分
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-live-op-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+ helm.sh/chart: cmlc-live-live-op-2.0.0
+ live-role: op-v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ live-role: op-v2
+ template:
+ metadata:
+ labels:
+ live-role: op-v2
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-op-cm-v2
+ items:
+ - key: live.op.conf
+ path: bootstrap.yaml
+ defaultMode: 420
+ containers:
+ - name: helm-live-op-v2
+ image: 192.168.1.4:8033/cmii/cmii-live-operator:5.2.0
+ ports:
+ - name: operator
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 4800m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /cmii/bootstrap.yaml
+ subPath: bootstrap.yaml
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ nodePort: 30333
+ selector:
+ live-role: op-v2
+ type: NodePort
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ live-role: op
+ type: ClusterIP
+ sessionAffinity: None
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-op-cm-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+data:
+ live.op.conf: |-
+ server:
+ port: 8080
+ spring:
+ main:
+ allow-bean-definition-overriding: true
+ allow-circular-references: true
+ application:
+ name: cmii-live-operator
+ platform:
+ info:
+ name: cmii-live-operator
+ description: cmii-live-operator
+ version: 2.0
+ scanPackage: com.cmii.live.op
+ cloud:
+ nacos:
+ config:
+ username: nacos
+ password: KingKong@95461234
+ server-addr: helm-nacos:8848
+ extension-configs:
+ - data-id: cmii-live-operator.yml
+ group: 2.0
+ refresh: true
+ shared-configs:
+ - data-id: cmii-backend-system.yml
+ group: 2.0
+ refresh: true
+ discovery:
+ enabled: false
+
+ live:
+ engine:
+ type: srs
+ endpoint: 'http://helm-live-srs-svc:1985'
+ proto:
+ rtmp: 'rtmp://36.133.66.183:31935'
+ rtsp: 'rtsp://36.133.66.183:30554'
+ srt: 'srt://36.133.66.183:30556'
+ flv: 'http://36.133.66.183:30500'
+ hls: 'http://36.133.66.183:30500'
+ rtc: 'webrtc://36.133.66.183:30080'
+ replay: 'https://36.133.66.183:30333'
+ minio:
+ endpoint: http://helm-minio:9000
+ access-key: cmii
+ secret-key: B#923fC7mk
+ bucket: live-cluster-hls
diff --git a/72-202602-绵阳飞服/k8s-middle/helm-influxdb.yaml b/72-202602-绵阳飞服/k8s-middle/helm-influxdb.yaml
new file mode 100644
index 0000000..9a0e796
--- /dev/null
+++ b/72-202602-绵阳飞服/k8s-middle/helm-influxdb.yaml
@@ -0,0 +1,280 @@
+---
+# Source: influxdb/templates/networkpolicy.yaml
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: helm-influxdb
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/version: 2.7.11
+ helm.sh/chart: influxdb-6.6.11
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/component: influxdb
+ policyTypes:
+ - Ingress
+ - Egress
+ egress:
+ - {}
+ ingress:
+ # Allow inbound connections
+ - ports:
+ - port: 8086
+ protocol: TCP
+ - port: 8088
+ protocol: TCP
+---
+# Source: influxdb/templates/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-influxdb
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/version: 2.7.11
+ helm.sh/chart: influxdb-6.6.11
+ app.kubernetes.io/component: influxdb
+automountServiceAccountToken: false
+---
+# Source: influxdb/templates/secrets.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-influxdb
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/version: 2.7.11
+ helm.sh/chart: influxdb-6.6.11
+type: Opaque
+data:
+ admin-user-password: "WTFjJVJoI2ZIMw=="
+ admin-user-token: "WXVubkhKQVNBQWRqMjNyYXNRQVdkNjIxZXJHQVM4MmthcWo="
+---
+# Source: influxdb/templates/pvc.yaml
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: helm-influxdb
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/version: 2.7.11
+ helm.sh/chart: influxdb-6.6.11
+ app.kubernetes.io/component: influxdb
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "8Gi"
+---
+# Source: influxdb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-influxdb
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/version: 2.7.11
+ helm.sh/chart: influxdb-6.6.11
+ app.kubernetes.io/component: influxdb
+spec:
+ type: ClusterIP
+ sessionAffinity: None
+ ports:
+ - port: 8086
+ targetPort: http
+ protocol: TCP
+ name: http
+ nodePort: null
+ - port: 8088
+ targetPort: rpc
+ protocol: TCP
+ name: rpc
+ nodePort: null
+ selector:
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/component: influxdb
+---
+# Source: influxdb/templates/deployment.yaml
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-influxdb
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/component: influxdb
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/version: 2.7.11
+ helm.sh/chart: influxdb-6.6.11
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: influxdb
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/name: influxdb
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/component: influxdb
+ app.kubernetes.io/instance: helm-influxdb
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: influxdb
+ app.kubernetes.io/version: 2.7.11
+ helm.sh/chart: influxdb-6.6.11
+ spec:
+ volumes:
+ - name: empty-dir
+ emptyDir: {}
+ - name: influxdb-credentials
+ secret:
+ secretName: helm-influxdb
+ defaultMode: 420
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-influxdb
+ containers:
+ - name: influxdb
+ image: 192.168.1.4:8033/cmii/influxdb:2.7.11-debian-12-r19
+ ports:
+ - name: http
+ containerPort: 8086
+ protocol: TCP
+ - name: rpc
+ containerPort: 8088
+ protocol: TCP
+ env:
+ - name: BITNAMI_DEBUG
+ value: 'true'
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: INFLUXDB_HTTP_AUTH_ENABLED
+ value: 'true'
+ - name: INFLUXDB_CREATE_USER_TOKEN
+ value: 'no'
+ - name: INFLUXDB_ADMIN_USER
+ value: cmlc
+ - name: INFLUXDB_ADMIN_USER_PASSWORD_FILE
+ value: /opt/bitnami/influxdb/secrets/admin-user-password
+ - name: INFLUXDB_ADMIN_USER_TOKEN_FILE
+ value: /opt/bitnami/influxdb/secrets/admin-user-token
+ - name: INFLUXDB_ADMIN_BUCKET
+ value: home
+ - name: INFLUXDB_ADMIN_ORG
+ value: docs
+ resources:
+ limits:
+ cpu: '4'
+ ephemeral-storage: 4Gi
+ memory: 4Gi
+ requests:
+ cpu: '2'
+ ephemeral-storage: 50Mi
+ memory: 4Gi
+ volumeMounts:
+ - name: empty-dir
+ mountPath: /tmp
+ subPath: tmp-dir
+ - name: empty-dir
+ mountPath: /opt/bitnami/influxdb/etc
+ subPath: app-conf-dir
+ - name: influxdb-credentials
+ mountPath: /opt/bitnami/influxdb/secrets/
+ - name: data
+ mountPath: /bitnami/influxdb
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ scheme: HTTP
+ initialDelaySeconds: 180
+ timeoutSeconds: 30
+ periodSeconds: 45
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - bash
+ - '-c'
+ - |
+ . /opt/bitnami/scripts/libinfluxdb.sh
+
+ influxdb_env
+ export INFLUX_USERNAME="$INFLUXDB_ADMIN_USER"
+ export INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"
+
+ timeout 29s influx ping --host http://$POD_IP:8086
+ initialDelaySeconds: 120
+ timeoutSeconds: 30
+ periodSeconds: 45
+ successThreshold: 1
+ failureThreshold: 6
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ privileged: false
+ seLinuxOptions: {}
+ runAsUser: 1001
+ runAsGroup: 1001
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: helm-influxdb
+ serviceAccount: helm-influxdb
+ securityContext:
+ fsGroup: 1001
+ fsGroupChangePolicy: Always
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - mianyang
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+
diff --git a/72-202602-绵阳飞服/rke-13014-cluster.yml b/72-202602-绵阳飞服/rke-13014-cluster.yml
new file mode 100644
index 0000000..160302d
--- /dev/null
+++ b/72-202602-绵阳飞服/rke-13014-cluster.yml
@@ -0,0 +1,252 @@
+nodes:
+ - address: 192.168.1.4
+ user: root
+ role:
+ - controlplane
+ - etcd
+ - worker
+ internal_address: 192.168.1.4
+ hostname_override: "master-192-168-1-4"
+ labels:
+ ingress-deploy: true
+ uavcloud.env: mianyang
+ - address: 192.168.1.3
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.1.3
+ hostname_override: "1-worker-192-168-1-3"
+ labels:
+ uavcloud.env: mianyang
+ - address: 192.168.1.5
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.1.5
+ hostname_override: "2-worker-192-168-1-5"
+ labels:
+ uavcloud.env: mianyang
+ - address: 192.168.1.2
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.1.2
+ hostname_override: "3-mysql-192-168-1-2"
+ labels:
+ mysql-deploy: true
+ - address: 192.168.1.6
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.1.6
+ hostname_override: "4-storage-192-168-1-6"
+ labels:
+ minio-deploy: true
+ doris-deploy: true
+
+
+authentication:
+ strategy: x509
+ sans:
+ - "192.168.1.4"
+ - "36.133.66.183"
+
+private_registries:
+ - url: 192.168.1.4:8033 # 私有镜像库地址
+ user: admin
+ password: "V2ryStr@ngPss"
+ is_default: true
+
+##############################################################################
+
+# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
+ignore_docker_version: true
+
+# Set the name of the Kubernetes cluster
+cluster_name: rke-cluster
+
+kubernetes_version: v1.30.14-rancher1-1
+
+ssh_key_path: /root/.ssh/id_ed25519
+
+# Enable running cri-dockerd
+# Up to Kubernetes 1.23, kubelet contained code called dockershim
+# to support Docker runtime. The replacement is called cri-dockerd
+# and should be enabled if you want to keep using Docker as your
+# container runtime
+# Only available to enable in Kubernetes 1.21 and higher
+enable_cri_dockerd: true
+
+services:
+ etcd:
+ backup_config:
+ enabled: false
+ interval_hours: 72
+ retention: 3
+ safe_timestamp: false
+ timeout: 300
+ creation: 12h
+ extra_args:
+ election-timeout: 5000
+ heartbeat-interval: 500
+ cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
+ gid: 0
+ retention: 72h
+ snapshot: false
+ uid: 0
+
+ kube-api:
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-controller
+ service_cluster_ip_range: 10.74.0.0/16
+ # Expose a different port range for NodePort services
+ service_node_port_range: 30000-40000
+ always_pull_images: false
+ pod_security_policy: false
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Enable audit log to stdout
+ audit-log-path: "-"
+ # Increase number of delete workers
+ delete-collection-workers: 3
+ # Set the level of log output to warning-level
+ v: 1
+ kube-controller:
+ # CIDR pool used to assign IP addresses to pods in the cluster
+ cluster_cidr: 172.96.0.0/16
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-api
+ service_cluster_ip_range: 10.74.0.0/16
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Set the level of log output to debug-level
+ v: 1
+ # Enable RotateKubeletServerCertificate feature gate
+ feature-gates: RotateKubeletServerCertificate=true
+ # Enable TLS Certificates management
+ # https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
+ cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
+ kubelet:
+ # Base domain for the cluster
+ cluster_domain: cluster.local
+ # IP address for the DNS service endpoint
+ cluster_dns_server: 10.74.0.10
+ # Fail if swap is on
+ fail_swap_on: false
+ # Set max pods to 250 instead of default 110
+ extra_binds:
+ - "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
+ extra_args:
+ max-pods: 122
+ # Optionally define additional volume binds to a service
+ scheduler:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 0
+ tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
+ kubeproxy:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 1
+
+authorization:
+ mode: rbac
+
+addon_job_timeout: 30
+
+# Specify network plugin-in (canal, calico, flannel, weave, or none)
+network:
+ mtu: 1440
+ options:
+ flannel_backend_type: vxlan
+ plugin: calico
+ tolerations:
+ - key: "node.kubernetes.io/unreachable"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+ - key: "node.kubernetes.io/not-ready"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+
+# Specify DNS provider (coredns or kube-dns)
+dns:
+ provider: coredns
+ nodelocal: {}
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 20%
+ maxSurge: 15%
+ linear_autoscaler_params:
+ cores_per_replica: 0.34
+ nodes_per_replica: 4
+ prevent_single_point_failure: true
+ min: 2
+ max: 3
+ tolerations:
+ - key: "node.kubernetes.io/unreachable"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+ - key: "node.kubernetes.io/not-ready"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+
+# Specify monitoring provider (metrics-server)
+monitoring:
+ provider: metrics-server
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 8
+
+ingress:
+ provider: nginx
+ default_backend: true
+ http_port: 30500
+ https_port: 31500
+ extra_envs:
+ - name: TZ
+ value: Asia/Shanghai
+ node_selector:
+ ingress-deploy: true
+ options:
+ use-forwarded-headers: "true"
+ access-log-path: /var/log/nginx/access.log
+ client-body-timeout: '6000'
+ compute-full-forwarded-for: 'true'
+ enable-underscores-in-headers: 'true'
+ log-format-escape-json: 'true'
+ log-format-upstream: >-
+ { "msec": "$msec", "connection": "$connection", "connection_requests":
+ "$connection_requests", "pid": "$pid", "request_id": "$request_id",
+ "request_length": "$request_length", "remote_addr": "$remote_addr",
+ "remote_user": "$remote_user", "remote_port": "$remote_port",
+ "http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
+ "$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
+ "request_uri": "$request_uri", "args": "$args", "status": "$status",
+ "body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
+ "http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
+ "http_host": "$http_host", "server_name": "$server_name", "request_time":
+ "$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
+ "$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
+ "upstream_response_time": "$upstream_response_time",
+ "upstream_response_length": "$upstream_response_length",
+ "upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
+ "$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
+ "request_method": "$request_method", "server_protocol": "$server_protocol",
+ "pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
+ "geoip_country_code": "$geoip_country_code" }
+ proxy-body-size: 5120m
+ proxy-read-timeout: '6000'
+ proxy-send-timeout: '6000'
+
+
diff --git a/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh b/998-常用脚本/a-Agent-WDD运行/a-批量命令.sh
similarity index 66%
rename from 998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh
rename to 998-常用脚本/a-Agent-WDD运行/a-批量命令.sh
index 32c081f..31174b7 100644
--- a/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh
+++ b/998-常用脚本/a-Agent-WDD运行/a-批量命令.sh
@@ -15,7 +15,7 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base selinux
/usr/local/bin/agent-wdd base sysconfig
-/usr/local/bin/agent-wdd zsh
+/usr/local/bin/agent-wdd zsh cn
# 首先需要下载所有的依赖!
@@ -56,8 +56,7 @@ done
export server=172.16.100.62
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
-ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
-ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
+ ssh root@${server} "/usr/local/bin/agent-wdd base ssh config && /usr/local/bin/agent-wdd base ssh key"
# 安装docker-compose
@@ -66,12 +65,35 @@ chmod +x /usr/local/bin/docker-compose
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
+# APT代理加速
+scp /root/wdd/apt-change.sh root@${server}:/root/wdd/apt-change.sh
+ssh root@${server} "bash /root/wdd/apt-change.sh -y"
+
+
+ssh root@${server} "echo \"\"> /etc/apt/apt.conf.d/01proxy"
+ssh root@${server} "printf '%s\n' \
+'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
+'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
+| tee /etc/apt/apt.conf.d/01proxy >/dev/null"
+ssh root@${server} "apt-get update"
+ssh root@${server} "apt-get install -y parted"
# 磁盘初始化
ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/disk.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/disk.sh"
+# master节点安装docker
+bash /root/wdd/docker.sh
+
+# 在线安装docker 通过APT代理
+scp /etc/apt/keyrings/docker.gpg root@${server}:/root/wdd/
+scp /root/wdd/docker.sh root@${server}:/root/wdd/
+
+ssh root@${server} "bash /root/wdd/docker.sh"
+ssh root@${server} "docker info"
+ssh root@${server} "docker compose version"
+
# 复制文件-docker
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
@@ -81,7 +103,6 @@ ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
-
ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
ssh root@${server} "/usr/local/bin/agent-wdd base selinux"
@@ -101,19 +122,34 @@ ssh root@${server} "docker info"
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
+ssh root@${server} "rm /root/wdd/*.sh"
# 主节点执行 安装harbor仓库
/usr/local/bin/agent-wdd base harbor install
# 安装rke kubectl
-mv /root/wdd/rke_amd64 /usr/local/bin/rke
+mv /root/wdd/rke_linux-amd64 /usr/local/bin/rke
chmod +x /usr/local/bin/rke
-mv /root/wdd/kubectl /usr/local/bin/kubectl
+mv /root/wdd/kubectl_v1.30.14_amd64 /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl
# 安装 k8s-证书
+mkdir /root/.kube
+cp ./kube_config_cluster.yml /root/.kube/config
-curl -s https://172.29.137.125
\ No newline at end of file
+# 环境测试
+DEFAULT_HTTP_BACKEND_IP=$(kubectl -n ingress-nginx get svc default-http-backend -o jsonpath='{.spec.clusterIP}')
+
+# master节点
+curl -s "http://${DEFAULT_HTTP_BACKEND_IP}"x
+
+# worker节点
+ssh root@"$server" "DEFAULT_HTTP_BACKEND_IP='$DEFAULT_HTTP_BACKEND_IP' bash -s" <<'EOF'
+echo "DEFAULT_HTTP_BACKEND_IP=$DEFAULT_HTTP_BACKEND_IP"
+
+curl -s "http://${DEFAULT_HTTP_BACKEND_IP}"
+echo
+EOF
\ No newline at end of file
diff --git a/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh b/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh
deleted file mode 100644
index f05fe68..0000000
--- a/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/bin/bash
-
-set -eo pipefail
-
-# 定义脚本参数
-DOCKER_VERSION="20.10" # 在这里修改期望的版本
-UBUNTU_IDS=("18.04" "20.04" "22.04" "24.04")
-ALIYUN_MIRROR="https://mirrors.aliyun.com"
-DOCKER_COMPOSE_VERSION="2.26.1"
-
-# 1. 检测Ubuntu环境
-check_ubuntu() {
- if ! command -v lsb_release &> /dev/null || [[ $(lsb_release -is) != "Ubuntu" ]]; then
- echo "错误:本脚本仅支持Ubuntu系统"
- exit 1
- fi
-
- local version_id=$(lsb_release -rs)
- if [[ ! " ${UBUNTU_IDS[*]} " =~ " ${version_id} " ]]; then
- echo "错误:不支持的Ubuntu版本 ${version_id},支持版本:${UBUNTU_IDS[*]}"
- exit 1
- fi
-}
-
-# 2. 替换阿里云源
-set_aliyun_mirror() {
- sudo sed -i "s/archive.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
- sudo sed -i "s/security.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
- sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates
-}
-
-# 3. 准备Docker仓库
-prepare_docker_env() {
- sudo mkdir -p /etc/apt/keyrings
- curl -fsSL $ALIYUN_MIRROR/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
-
- local codename=$(lsb_release -cs)
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $ALIYUN_MIRROR/docker-ce/linux/ubuntu $codename stable" | \
- sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
- sudo apt-get update
-}
-
-# 4. 版本解析优化版本
-get_docker_version() {
- local target_version=""
- if [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then
- # 提取大版本下最高小版本
- target_version=$(apt-cache madison docker-ce \
- | awk -F'|' '{gsub(/ /,"",$2); print $2}' \
- | grep -E "^[0-9]+:${DOCKER_VERSION}([.-]|\~\w+)" \
- | sort -rV \
- | head -1)
- elif [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
- # 精确版本匹配
- target_version=$(apt-cache madison docker-ce \
- | awk -F'|' '{gsub(/ /,"",$2); print $2}' \
- | grep -E "^[0-9]+:${DOCKER_VERSION}.*$(lsb_release -cs)" )
- fi
-
- [ -z "$target_version" ] && echo "错误:找不到Docker版本 $DOCKER_VERSION" && exit 1
- echo "$target_version" | sed 's/^[0-9]+://' # 去除前缀
-}
-
-# 5. 主流程
-main() {
- check_ubuntu
- echo "-- 设置阿里云源 --"
- set_aliyun_mirror
-
- echo "-- 准备Docker仓库 --"
- prepare_docker_env
-
- echo "-- 解析Docker版本 --"
- local full_version=$(get_docker_version)
- echo "选择版本:$full_version"
-
- echo "-- 安装组件 --"
- sudo apt-get install -y \
- docker-ce-cli="$full_version" \
- docker-ce="$full_version" \
- docker-ce-rootless-extras="$full_version" \
- containerd.io \
- docker-buildx-plugin \
- docker-compose-plugin
-
- echo "-- 安装docker-compose --"
- sudo curl -sSL "https://get.daocloud.io/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
- sudo chmod +x /usr/local/bin/docker-compose
-
- echo "-- 禁用自动更新 --"
- sudo apt-mark hold docker-ce docker-ce-cli containerd.io
-
- echo "-- 启动服务 --"
- sudo systemctl enable docker && sudo systemctl start docker
-
- echo -e "\n=== 安装完成 ==="
- docker --version
- docker-compose --version
-}
-
-main
-
-
-
-
-请写一个shell,基于上述的部分安装逻辑,实现如下的功能
-脚本前面提取变量 docker的版本号 20.10.15 或 20.10(安装小版本最高的版本)
-1. 检测当前主机是否是ubuntu环境,本脚本支支持Ubuntu
-2. 获取本机的版本号,支持ubuntu18.04 20.04 22.04 24.04的版本
-3. 根据ubuntu版本修改,apt的镜像源为阿里源
-4. 在线安装符合变量版本的docker,在线安装docker-compose,安装常用的插件
-5. 禁止docker自动更新
diff --git a/998-常用脚本/a-Agent-WDD运行/b-高级磁盘-disk.sh b/998-常用脚本/a-Agent-WDD运行/b-高级磁盘-disk.sh
new file mode 100644
index 0000000..18eda45
--- /dev/null
+++ b/998-常用脚本/a-Agent-WDD运行/b-高级磁盘-disk.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+set -e
+
+# 用户配置部分
+DISK="/dev/sdb" # 要操作的物理磁盘(请根据实际情况修改)
+MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
+FS_TYPE="ext4" # 文件系统类型(支持ext4/xfs,默认ext4)
+
+#----------------------------------------------------------
+# 核心逻辑(建议非必要不修改)
+#----------------------------------------------------------
+
+function check_prerequisites() {
+ # 必须root权限运行检查
+ [[ $EUID -ne 0 ]] && echo -e "\033[31m错误:必须使用root权限运行此脚本\033[0m" && exit 1
+
+ # 磁盘存在性检查
+ [[ ! -b "$DISK" ]] && echo -e "\033[31m错误:磁盘 $DISK 不存在\033[0m" && exit 1
+
+ # 文件系统类型校验
+ if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
+ echo -e "\033[31m错误:不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
+ exit 1
+ fi
+}
+
+function prepare_disk() {
+ local partition="${DISK}1"
+
+ echo -e "\033[34m正在初始化磁盘分区...\033[0m"
+ parted "$DISK" --script mklabel gpt
+ parted "$DISK" --script mkpart primary 0% 100%
+ parted "$DISK" --script set 1 lvm on
+ partprobe "$DISK" # 确保系统识别新分区表
+
+ echo -e "\033[34m正在创建LVM结构...\033[0m"
+ pvcreate "$partition"
+ vgcreate datavg "$partition"
+ lvcreate -y -l 100%FREE -n lvdata datavg
+}
+
+function format_and_mount() {
+ echo -e "\033[34m格式化逻辑卷...\033[0m"
+ if [[ "$FS_TYPE" == "ext4" ]]; then
+ mkfs.ext4 -F "/dev/datavg/lvdata"
+ else
+ mkfs.xfs -f "/dev/datavg/lvdata"
+ fi
+
+ echo -e "\033[34m设置挂载配置...\033[0m"
+ mkdir -p "$MOUNT_PATH"
+ UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
+ echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
+ mount -a
+}
+
+function verify_result() {
+ echo -e "\n\033[1;36m最终验证结果:\033[0m"
+ lsblk -f "$DISK"
+ echo -e "\n磁盘空间使用情况:"
+ df -hT "$MOUNT_PATH"
+}
+
+# 主执行流程
+check_prerequisites
+prepare_disk
+format_and_mount
+verify_result
+
+echo -e "\n\033[32m操作执行完毕,请仔细核查上述输出信息\033[0m"
+
+
+
+#请写一个shell脚本,脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
+#1.将物理磁盘的盘符修改为gpt格式
+#2.将物理磁盘全部空间创建一个分区,分区格式为lvm
+#3.将分区分配给逻辑卷datavg
+#4.将datavg所有可用的空间分配给逻辑卷lvdata
+#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
+#6.创建变量挂载点路径
+#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
+#8.执行lsblk和df -TH查看分区是否正确挂载
+
+
diff --git a/998-常用脚本/a-Agent-WDD运行/c-联网-docker安装.sh b/998-常用脚本/a-Agent-WDD运行/c-联网-docker安装.sh
new file mode 100644
index 0000000..18255bb
--- /dev/null
+++ b/998-常用脚本/a-Agent-WDD运行/c-联网-docker安装.sh
@@ -0,0 +1,594 @@
+#!/usr/bin/env bash
+# ==============================================================================
+# Metadata
+# ==============================================================================
+# Author : Smith Wang (Refactor by ChatGPT)
+# Version : 2.0.0
+# License : MIT
+# Description : Configure Docker APT repository (mirror) and install Docker on
+# Ubuntu (18.04/20.04/22.04/24.04) with robust offline handling.
+#
+# Modules :
+# - Logging & Error Handling
+# - Environment & Dependency Checks
+# - Public Network Reachability Detection
+# - Docker GPG Key Installation (Online/Offline)
+# - Docker APT Repo Configuration
+# - Docker Installation & Service Setup
+#
+# Notes :
+# - This script DOES NOT modify Ubuntu APT sources (/etc/apt/sources.list)
+# - This script DOES NOT set APT proxy (assumed handled elsewhere)
+# - If public network is NOT reachable and local GPG key is missing, script
+# will NOT proceed (per your requirement).
+#
+# ShellCheck : Intended clean for bash v5+ with: shellcheck -x