diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index 2ffcced..8139b9c 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -4,101 +4,92 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
+
+
@@ -108,23 +99,8 @@
-
-
-
{}
{
- "configuredContexts": [
- {
- "name": "hunan-kcs",
- "kubeConfigUrl": "file://C:/Users/wdd/.kube/config",
- "currentNamespace": "Kubernetes.All.Namespaces.Label"
- },
- {
- "name": "cmii-dev-cluster",
- "kubeConfigUrl": "file://C:/Users/wdd/.kube/config",
- "currentNamespace": "uavcloud-devflight"
- }
- ],
"isMigrated": true
}
{
@@ -132,7 +108,7 @@
"associatedIndex": 7
}
-
+
@@ -141,6 +117,7 @@
{
"keyToString": {
+ "KUBERNETES_SUPPRESS_CONFIG_CLUSTER_SUGGESTION": "true",
"RunOnceActivity.ShowReadmeOnStart": "true",
"RunOnceActivity.git.unshallow": "true",
"RunOnceActivity.go.formatter.settings.were.checked": "true",
@@ -148,27 +125,25 @@
"SHARE_PROJECT_CONFIGURATION_FILES": "true",
"git-widget-placeholder": "main",
"go.import.settings.migrated": "true",
- "last_opened_file_path": "C:/Users/wdd/Documents/IdeaProjects/CmiiDeploy/54-202501-深圳规自-ARM/部署yaml",
+ "last_opened_file_path": "C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/67-202508-雄安空能院",
"node.js.detected.package.eslint": "true",
"node.js.detected.package.tslint": "true",
"node.js.selected.package.eslint": "(autodetect)",
"node.js.selected.package.tslint": "(autodetect)",
"nodejs_package_manager_path": "npm",
- "settings.editor.selected.configurable": "com.intellij.kubernetes.view.ui.settings.KubernetesViewConfigurable",
+ "settings.editor.selected.configurable": "editor.preferences.tabs",
"vue.rearranger.settings.migration": "true"
}
}
-
-
-
-
-
+
+
+
+
+
-
-
@@ -177,8 +152,7 @@
-
-
+
@@ -199,21 +173,33 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -223,23 +209,15 @@
1742174375760
-
+
- 1744873984891
+ 1744874102820
- 1744873984891
+ 1744874102820
-
-
- 1744873998562
-
-
-
- 1744873998562
-
-
+
@@ -257,16 +235,11 @@
-
-
-
+
+
true
-
-
-
-
\ No newline at end of file
diff --git a/58-202503-新DEMO环境/1-磁盘挂载.sh b/58-202503-新DEMO环境/1-磁盘挂载.sh
new file mode 100644
index 0000000..e69de29
diff --git a/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml b/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml
index 98faa73..5b56002 100644
--- a/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml
+++ b/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml
@@ -37,11 +37,9 @@ spec:
defaultMode: 420
- name: be-storage
persistentVolumeClaim:
- # claimName: meta
claimName: doris-be-storage-pvc
- name: be-log
persistentVolumeClaim:
- # claimName: meta
claimName: doris-fe-log-pvc
initContainers:
- name: default-init
@@ -116,11 +114,11 @@ spec:
value: '9030'
resources:
limits:
- cpu: '2'
- memory: 2Gi
+ cpu: '16'
+ memory: 32Gi
requests:
- cpu: '1'
- memory: 1Gi
+ cpu: '8'
+ memory: 32Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
diff --git a/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml b/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml
index b9c7e87..c4ab138 100644
--- a/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml
+++ b/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml
@@ -1,3 +1,4 @@
+---
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
diff --git a/66-202505-浙江二级监管/0-批量脚本.sh b/66-202505-浙江二级监管/0-批量脚本.sh
new file mode 100644
index 0000000..bfcab74
--- /dev/null
+++ b/66-202505-浙江二级监管/0-批量脚本.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+
+host_ip_list=(192.168.10.20 192.168.10.21 192.168.10.22 192.168.10.23 192.168.10.16 192.168.10.17 192.168.10.18 192.168.10.19)
+
+for server in "${host_ip_list[@]}";do
+ echo "server is ${server}"
+
+# ssh -p 2202 root@"$server" "mkdir /root/.ssh && echo \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIgzVwaG6h4al71GhrM2zRmJ8hg7ySelDM0GXUz3SZiF wdd@cmii.com\" >> /root/.ssh/authorized_keys"
+
+ ssh -p 2202 root@"$server" "echo yes !"
+# ssh -p 2202 root@"$server" "systemctl start nfs-client & systemctl start nfs-client & systemctl start nfs-common & systemctl enable nfs-common"
+# ssh -p 2202 root@"$server" "yum install -y chrony"
+# ssh -p 2202 root@"$server" "sed -i \"s/server 10.211.174.206 iburst/server 192.168.10.3 iburst/g\" /etc/chrony.conf"
+# ssh -p 2202 root@"$server" "systemctl restart chronyd && systemctl enable chronyd"
+# ssh -p 2202 root@"$server" "timedatectl && echo "" && chronyc sources"
+# ssh -p 2202 root@"$server" "cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back_wdd"
+# ssh -p 2202 root@"$server" "rm /etc/ssh/sshd_config"
+# scp -P 2202 /etc/ssh/sshd_config root@"$server":/etc/ssh/sshd_config
+
+# ssh -p 2202 root@"$server" "systemctl restart sshd"
+
+# scp -P 2202 /root/yanko/files/docker-19.03.15.tgz root@"$server":/data/
+
+
+# ssh -p 2202 root@"$server" "sudo tar -xzvf /data/docker-19.03.15.tgz -C /usr/bin --strip-components=1"
+# ssh -p 2202 root@"$server" "systemctl restart docker && sleep 3 && docker info"
+
+# scp -P 2202 /root/agent-wdd_linux_amd64 root@"$server":/usr/local/bin/agent-wdd
+# ssh -p 2202 root@"$server" "chmod +x /usr/local/bin/agent-wdd"
+# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base swap"
+# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base firewall"
+# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base selinux"
+# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base sysconfig"
+
+# ssh -p 2202 root@"$server" "docker stop \$(docker ps -aq)"
+# ssh -p 2202 root@"$server" "docker container rm \$(docker ps -aq)"
+
+ ssh -p 2202 root@"$server" "reboot"
+
+
+
+
+done
+
+
+
+
+
+
+
+
+
+
diff --git a/66-202505-浙江二级监管/ai-config.yaml b/66-202505-浙江二级监管/ai-config.yaml
new file mode 100644
index 0000000..beec030
--- /dev/null
+++ b/66-202505-浙江二级监管/ai-config.yaml
@@ -0,0 +1,182 @@
+app:
+ env: default
+ port: 2333
+ log:
+ level: DEBUG
+ node:
+ cluster:
+ enable: false
+ capacity:
+ cpu: 8
+ id: "auto"
+ ip: "auto"
+ redis:
+ host: 192.168.10.3
+ port: 36379
+ database: 6
+ password: Mcache@4522
+ rabbitmq:
+ host: 192.168.10.3
+ port: 35672
+ username: admin
+ password: nYcRN91r._hj
+ mqtt:
+ host: 192.168.10.3
+ port: 32883
+ username: cmlc
+ password: odD8#Ve7.B
+
+ai_models:
+ # Then remember to synchronously updated the configuration here
+ # to ModelStore core:tasking:store:ModelStore
+ local:
+ drone:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/drone-20241223-t4.rt"
+ classes: "drone, bird"
+ sea:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/sea.engine"
+ classes: "person, boat"
+ people_vehicle:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/people_vehicle-t4-20240410.rt"
+ classes: "others, people, crowd, motor, car, truck, bus, non-motor vehicle"
+ vehicle:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/vehicle-20240328-t4.rt"
+ classes: "others, people, crowd, motor, car, truck, bus, non-motor vehicle"
+ inf_person:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/inf_person-20241129-t4.rt"
+ classes: "person"
+ ship:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/ship-20240306-t4.rt"
+ classes: "ship"
+ ship_with_flag:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/ship-20240306-t4.rt"
+ classes: "ship, flag"
+ drowning:
+ enable: true
+ type: yolov8
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/drowning-20240222-t4.rt"
+ classes: "drowner"
+ dino:
+ enable: false
+ type: dino
+ path: "/cmii/cmlc-project-ai-streaming-engine/models/dino/ground.engine"
+ tokenizer: "bert-base-uncased"
+ fake:
+ # Do nothing. For some tasks not need ai processing such as adding text.
+ enable: true
+ type: fake
+
+
+task:
+ plain:
+ usage:
+ cpu: 2
+ laad:
+ usage:
+ cpu: 2
+ mq:
+ detail:
+ topic: "event.ai.photoelectricity.warn.detail"
+ exchange: "event.ai.photoelectricity.warn.detail"
+ briefly:
+ topic: "event.ai.photoelectricity.warn.briefly"
+ exchange: "event.ai.photoelectricity.warn.briefly"
+ count:
+ usage:
+ cpu: 2
+ mq:
+ topic: "aiVideo"
+ exchange: "aiVideo"
+ accumulation:
+ usage:
+ cpu: 2
+ mq:
+ topic: "aiVideo"
+ exchange: "aiVideo"
+ text:
+ usage:
+ cpu: 2
+
+module:
+ shm:
+ ring_size: 20
+ max_w: 2600
+ max_h: 1500
+ max_dets: 256
+ smot:
+ alive: 1
+ tolerance: 256
+ drop: 192
+ hits: 2
+ ffio:
+ gpu:
+ enable: true
+ track:
+ type: bytetrack
+ bytetrack:
+ fps: 30
+ draw:
+ colors:
+ default: [ 0, 255, 0 ]
+ drone: [ 229, 57, 57 ]
+ bird: [ 97, 237, 38 ]
+ motor: [ 92, 184, 255 ]
+ car: [ 67, 144, 219 ]
+ truck: [ 41, 115, 204 ]
+ bus: [ 36, 93, 179 ]
+ person: [ 255, 200, 51 ]
+ people: [ 255, 200, 51 ]
+ drowner: [ 0, 127, 245 ]
+ ship: [ 102, 236, 204 ]
+ region: [60, 110, 156]
+ crossline: [60, 110, 156]
+ text:
+ padding: 4
+ skip_threshold: 20
+
+cmlc:
+ mapper:
+ "111":
+ task: count
+ model: vehicle
+ "114":
+ task: count
+ model: vehicle
+ "115":
+ task: accumulation
+ model: vehicle
+ "112":
+ task: count
+ model: inf_person
+ "113":
+ task: count
+ model: drowning
+ "121":
+ task: laad
+ model: drone
+ "122":
+ task: count
+ model: drone
+ "131":
+ task: count
+ model: ship
+ "201":
+ task: text
+ model: fake
+
+debug:
+ enable: true
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/cluster.yaml b/66-202505-浙江二级监管/cluster.yaml
new file mode 100644
index 0000000..61d75b4
--- /dev/null
+++ b/66-202505-浙江二级监管/cluster.yaml
@@ -0,0 +1,324 @@
+nodes:
+ - address: 192.168.10.3
+ user: root
+ port: 2202
+ role:
+ - controlplane
+ - etcd
+ - worker
+ internal_address: 192.168.10.3
+ labels:
+ ingress-deploy: true
+ - address: 192.168.10.4
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.4
+ labels:
+ ingress-deploy: true
+ mysql-deploy: true
+ uavcloud.env: zjyd
+ - address: 192.168.10.5
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.5
+ labels:
+ ingress-deploy: true
+ uavcloud.env: zjyd
+ - address: 192.168.10.6
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.6
+ labels:
+ ingress-deploy: true
+ uavcloud.env: zjyd
+ - address: 192.168.10.2
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.2
+ labels:
+ mongo.node: master
+ - address: 192.168.10.8
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.8
+ labels:
+ uavcloud.env: zjyd
+ - address: 192.168.10.9
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.9
+ labels:
+ redis.node: master
+ - address: 192.168.10.20
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.20
+ labels:
+ uavcloud.env: zjyd
+ - address: 192.168.10.21
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.21
+ labels:
+ uavcloud.env: zjyd
+ - address: 192.168.10.22
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.22
+ labels:
+ uavcloud.env: zjyd
+ - address: 192.168.10.23
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.23
+ labels:
+ uavcloud.env: zjyd
+ - address: 192.168.10.16
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.16
+ labels:
+ doris.cluster: "true"
+ - address: 192.168.10.17
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.17
+ labels:
+ doris.cluster: "true"
+ - address: 192.168.10.18
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.18
+ labels:
+ doris.cluster: "true"
+ - address: 192.168.10.19
+ user: root
+ port: 2202
+ role:
+ - worker
+ internal_address: 192.168.10.19
+ labels:
+ doris.cluster: "true"
+
+
+
+
+authentication:
+ strategy: x509
+ sans:
+ - "192.168.10.3"
+
+private_registries:
+ - url: 192.168.10.3:8033 # 私有镜像库地址
+ user: admin
+ password: "V2ryStr@ngPss"
+ is_default: true
+
+##############################################################################
+
+# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
+ignore_docker_version: true
+
+# Set the name of the Kubernetes cluster
+cluster_name: rke-cluster
+
+kubernetes_version: v1.20.4-rancher1-1
+
+ssh_key_path: /root/.ssh/id_ed25519
+
+# Enable running cri-dockerd
+# Up to Kubernetes 1.23, kubelet contained code called dockershim
+# to support Docker runtime. The replacement is called cri-dockerd
+# and should be enabled if you want to keep using Docker as your
+# container runtime
+# Only available to enable in Kubernetes 1.21 and higher
+enable_cri_dockerd: true
+
+
+services:
+ etcd:
+ backup_config:
+ enabled: false
+ interval_hours: 72
+ retention: 3
+ safe_timestamp: false
+ timeout: 300
+ creation: 12h
+ extra_args:
+ election-timeout: 5000
+ heartbeat-interval: 500
+ gid: 0
+ retention: 72h
+ snapshot: false
+ uid: 0
+
+ kube-api:
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-controller
+ service_cluster_ip_range: 172.29.0.0/16
+ # Expose a different port range for NodePort services
+ service_node_port_range: 30000-40000
+ always_pull_images: true
+ pod_security_policy: false
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Enable audit log to stdout
+ audit-log-path: "-"
+ # Increase number of delete workers
+ delete-collection-workers: 3
+ # Set the level of log output to warning-level
+ v: 0
+ # Using the EventRateLimit admission control enforces a limit on the number of events
+ # that the API Server will accept in a given time period
+ # Available as of v1.0.0
+ event_rate_limit:
+ enabled: false
+ configuration:
+ apiVersion: eventratelimit.admission.k8s.io/v1alpha1
+ kind: Configuration
+ limits:
+ - type: Server
+ qps: 6000
+ burst: 30000
+ kube-controller:
+ # CIDR pool used to assign IP addresses to pods in the cluster
+ cluster_cidr: 172.28.0.0/16
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-api
+ service_cluster_ip_range: 172.29.0.0/16
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Set the level of log output to debug-level
+ v: 1
+ # Enable RotateKubeletServerCertificate feature gate
+ feature-gates: RotateKubeletServerCertificate=true
+ # Enable TLS Certificates management
+ # https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
+ cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
+ kubelet:
+ # Base domain for the cluster
+ cluster_domain: cluster.local
+ # IP address for the DNS service endpoint
+ cluster_dns_server: 172.29.0.10
+ # Fail if swap is on
+ fail_swap_on: false
+ # Set max pods to 250 instead of default 110
+ extra_binds:
+ - "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
+ extra_args:
+ max-pods: 162
+ # Optionally define additional volume binds to a service
+ scheduler:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 0
+ kubeproxy:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 0
+
+authorization:
+ mode: rbac
+
+addon_job_timeout: 30
+
+network:
+ options:
+ flannel_backend_type: host-gw
+ flannel_iface: ens192
+ flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
+ flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
+ plugin: flannel
+
+# Specify network plugin-in (canal, calico, flannel, weave, or none)
+# network:
+# mtu: 1440
+# options:
+# flannel_backend_type: vxlan
+# plugin: calico
+# tolerations:
+# - key: "node.kubernetes.io/unreachable"
+# operator: "Exists"
+# effect: "NoExecute"
+# tolerationseconds: 300
+# - key: "node.kubernetes.io/not-ready"
+# operator: "Exists"
+# effect: "NoExecute"
+# tolerationseconds: 300
+
+# Specify DNS provider (coredns or kube-dns)
+dns:
+ provider: coredns
+ nodelocal:
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 20%
+ maxSurge: 15%
+ linear_autoscaler_params:
+ cores_per_replica: 0.34
+ nodes_per_replica: 4
+ prevent_single_point_failure: true
+ min: 2
+ max: 3
+ tolerations:
+ - key: "node.kubernetes.io/unreachable"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+ - key: "node.kubernetes.io/not-ready"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+
+# Specify monitoring provider (metrics-server)
+monitoring:
+ provider: metrics-server
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 8
+
+ingress:
+ provider: nginx
+ default_backend: true
+ http_port: 0
+ https_port: 0
+ extra_envs:
+ - name: TZ
+ value: Asia/Shanghai
+ node_selector:
+ ingress-deploy: true
+ options:
+ use-forwarded-headers: "true"
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/doris-部署/0-节点lable.sh b/66-202505-浙江二级监管/doris-部署/0-节点lable.sh
new file mode 100644
index 0000000..bff32f4
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/0-节点lable.sh
@@ -0,0 +1,26 @@
+
+
+
+kubectl label nodes 192.168.10.17 192.168.10.18 192.168.10.19 doris-be-node=true
+
+
+# 2. 在每台节点创建存储目录
+for node in 192.168.10.17 192.168.10.18 192.168.10.19; do
+ ssh -p 2202 root@"$node" "sudo mkdir -p /data/doris-be/storage && sudo chmod 777 /data/doris-be"
+ ssh -p 2202 root@"$node" "ls /data/doris-be/"
+done
+
+kubectl label nodes 192.168.10.16 doris-fe-node=true
+for node in 192.168.10.16; do
+ ssh -p 2202 root@"$node" "sudo mkdir -p /data/doris-fe/storage && sudo chmod 777 /data/doris-fe"
+ ssh -p 2202 root@"$node" "ls /data/dorisfe/"
+done
+
+# uas的业务 保证防重复部署 只能在这几台
+kubectl label nodes 192.168.10.20 192.168.10.21 192.168.10.22 192.168.10.23 uavcloud.env=zjejpt-uas
+
+# rabbitmq需要固定到特定的节点
+
+kubectl label nodes 192.168.10.8 rabbitmq.node=master
+kubectl label nodes 192.168.10.8 emqx.node=master
+kubectl label nodes 192.168.10.9 redis.node=master
diff --git a/66-202505-浙江二级监管/doris-部署/doris-all-service.yaml b/66-202505-浙江二级监管/doris-部署/doris-all-service.yaml
new file mode 100644
index 0000000..4fbd831
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/doris-all-service.yaml
@@ -0,0 +1,102 @@
+kind: Service
+apiVersion: v1
+metadata:
+ namespace: zjejpt-uas
+ name: doris-cluster-be-internal
+ labels:
+ app.kubernetes.io/component: doris-cluster-be-internal
+spec:
+ ports:
+ - name: heartbeat-port
+ protocol: TCP
+ port: 9050
+ targetPort: 9050
+ selector:
+ app.kubernetes.io/component: doris-cluster-be
+ clusterIP: None
+ type: ClusterIP
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-be-service
+ namespace: zjejpt-uas
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+spec:
+ ports:
+ - name: be-port
+ protocol: TCP
+ port: 9060
+ targetPort: 9060
+ nodePort: 32189
+ - name: webserver-port
+ protocol: TCP
+ port: 8040
+ targetPort: 8040
+ nodePort: 31624
+ - name: heartbeat-port
+ protocol: TCP
+ port: 9050
+ targetPort: 9050
+ nodePort: 31625
+ - name: brpc-port
+ protocol: TCP
+ port: 8060
+ targetPort: 8060
+ nodePort: 31627
+ selector:
+ app.kubernetes.io/component: doris-cluster-be
+ type: NodePort
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-fe-internal
+ namespace: zjejpt-uas
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ ports:
+ - name: query-port
+ protocol: TCP
+ port: 9030
+ targetPort: 9030
+ selector:
+ app.kubernetes.io/component: doris-cluster-fe
+ clusterIP: None
+ type: ClusterIP
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-fe-service
+ namespace: zjejpt-uas
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ ports:
+ - name: http-port
+ protocol: TCP
+ port: 8030
+ targetPort: 8030
+ nodePort: 31620
+ - name: rpc-port
+ protocol: TCP
+ port: 9020
+ targetPort: 9020
+ nodePort: 31621
+ - name: query-port
+ protocol: TCP
+ port: 9030
+ targetPort: 9030
+ nodePort: 31622
+ - name: edit-log-port
+ protocol: TCP
+ port: 9010
+ targetPort: 9010
+ nodePort: 31623
+ selector:
+ app.kubernetes.io/component: doris-cluster-fe
+ type: NodePort
+---
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml b/66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml
new file mode 100644
index 0000000..e12fb95
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml
@@ -0,0 +1,82 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: doris-cluster-be-conf
+ namespace: zyly
+ labels:
+ app.kubernetes.io/component: be
+data:
+ be.conf: >
+ CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+ # Log dir
+ LOG_DIR="${DORIS_HOME}/log/"
+
+ # For jdk 8
+ JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
+
+ # Set your own JAVA_HOME
+ # JAVA_HOME=/path/to/jdk/
+
+ # https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
+ # https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
+ JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
+ JEMALLOC_PROF_PRFIX=""
+
+ # ports for admin, web, heartbeat service
+ be_port = 9060
+ webserver_port = 8040
+ heartbeat_service_port = 9050
+ brpc_port = 8060
+ arrow_flight_sql_port = -1
+
+ # HTTPS configures
+ enable_https = false
+ # path of certificate in PEM format.
+ #ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
+ # path of private key in PEM format.
+ #ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
+
+ # Choose one if there are more than one ip except loopback address.
+ # Note that there should at most one ip match this list.
+ # If no ip match this rule, will choose one randomly.
+ # use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
+ # Default value is empty.
+ # priority_networks = 10.10.10.0/24;192.168.0.0/16
+
+ # data root path, separate by ';'
+ # You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
+ # eg:
+ # storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
+ # storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
+ # /home/disk2/doris,medium:HDD(default)
+ #
+ # you also can specify the properties by setting ':', separate by ','
+ # property 'medium' has a higher priority than the extension of path
+ #
+ # Default value is ${DORIS_HOME}/storage, you should create it by hand.
+ # storage_root_path = ${DORIS_HOME}/storage
+
+ # Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
+ # jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
+
+ # Advanced configurations
+ # INFO, WARNING, ERROR, FATAL
+ sys_log_level = INFO
+ # sys_log_roll_mode = SIZE-MB-1024
+ # sys_log_roll_num = 10
+ # sys_log_verbose_modules = *
+ # log_buffer_level = -1
+
+ # aws sdk log level
+ # Off = 0,
+ # Fatal = 1,
+ # Error = 2,
+ # Warn = 3,
+ # Info = 4,
+ # Debug = 5,
+ # Trace = 6
+ # Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
+ #aws_log_level=0
+ ## If you are not running in aws cloud, you can disable EC2 metadata
+ #AWS_EC2_METADATA_DISABLED=false
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml b/66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml
new file mode 100644
index 0000000..219e169
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml
@@ -0,0 +1,208 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: doris-cluster-be
+ namespace: zjejpt-uas
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: doris-cluster-be
+ template:
+ metadata:
+ name: doris-cluster-be
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - "192.168.10.17"
+ - "192.168.10.18"
+ - "192.168.10.19"
+ - key: doris-be-node
+ operator: Exists
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values: [ "doris-cluster-be" ]
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: be-local-storage
+ hostPath:
+ path: /data/doris-be/storage
+ type: DirectoryOrCreate
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: labels
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ - path: annotations
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.annotations
+ defaultMode: 420
+ initContainers:
+ - name: pod-ordinal-init
+ image: 192.168.10.3:8033/cmii/alpine:1.0.0
+ command: [ 'sh', '-c' ]
+ args:
+ - |
+ # 获取Pod序号
+ POD_ORDINAL=$(echo ${POD_NAME} | awk -F- '{print $NF}')
+
+ # 通过节点名称匹配序号
+ case ${NODE_NAME} in
+ "192.168.10.17") ORDINAL=0 ;;
+ "192.168.10.18") ORDINAL=1 ;;
+ "192.168.10.19") ORDINAL=2 ;;
+ esac
+
+ # 验证序号匹配
+ if [ "$POD_ORDINAL" != "$ORDINAL" ]; then
+ echo "ERROR: Pod ordinal ${POD_ORDINAL} not match node ${NODE_NAME}"
+ exit 1
+ fi
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: default-init
+ image: '192.168.10.3:8033/cmii/alpine:1.0.0'
+ command:
+ - /bin/sh
+ args:
+ - '-c'
+ - sysctl -w vm.max_map_count=2000000 && swapoff -a
+ resources:
+ limits:
+ cpu: '2'
+ memory: 2Gi
+ requests:
+ cpu: '1'
+ memory: 1Gi
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ containers:
+ - name: be
+ image: '192.168.10.3:8033/cmii/doris.be-amd64:2.1.6'
+ command:
+ - /opt/apache-doris/be_entrypoint.sh
+ args:
+ - $(ENV_FE_ADDR)
+ ports:
+ - name: be-port
+ containerPort: 9060
+ protocol: TCP
+ - name: webserver-port
+ containerPort: 8040
+ protocol: TCP
+ - name: heartbeat-port
+ containerPort: 9050
+ protocol: TCP
+ - name: brpc-port
+ containerPort: 8060
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CONFIGMAP_MOUNT_PATH
+ value: /etc/doris
+ - name: USER
+ value: root
+ - name: DORIS_ROOT
+ value: /opt/apache-doris
+ - name: ENV_FE_ADDR
+ value: doris-cluster-fe-service
+ - name: FE_QUERY_PORT
+ value: '9030'
+ resources:
+ limits:
+ cpu: '16'
+ memory: 32Gi
+ requests:
+ cpu: '8'
+ memory: 32Gi
+ volumeMounts:
+ - name: be-local-storage
+ mountPath: /opt/apache-doris/be/storage
+ - name: be-local-storage
+ mountPath: /opt/apache-doris/be/log
+ livenessProbe:
+ tcpSocket:
+ port: 9050
+ initialDelaySeconds: 80
+ timeoutSeconds: 180
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 8040
+ scheme: HTTP
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ tcpSocket:
+ port: 9050
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 60
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/apache-doris/be_prestop.sh
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ serviceName: doris-cluster-be-internal
+ podManagementPolicy: Parallel
diff --git a/66-202505-浙江二级监管/doris-部署/doris-be-statusfulset-localpv-failed.yaml b/66-202505-浙江二级监管/doris-部署/doris-be-statusfulset-localpv-failed.yaml
new file mode 100644
index 0000000..a64f119
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/doris-be-statusfulset-localpv-failed.yaml
@@ -0,0 +1,188 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: doris-cluster-be
+ namespace: zjejpt-uas
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: doris-cluster-be
+ template:
+ metadata:
+ name: doris-cluster-be
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ volumes:
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: labels
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ - path: annotations
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.annotations
+ defaultMode: 420
+ - name: doris-cluster-be-conf
+ configMap:
+ name: doris-cluster-be-conf
+ defaultMode: 420
+ initContainers:
+ - name: default-init
+ image: '192.168.10.3:8033/cmii/alpine:1.0.0'
+ command:
+ - /bin/sh
+ args:
+ - '-c'
+ - sysctl -w vm.max_map_count=2000000 && swapoff -a
+ resources:
+ limits:
+ cpu: '2'
+ memory: 2Gi
+ requests:
+ cpu: '1'
+ memory: 1Gi
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ containers:
+ - name: be
+ image: '192.168.10.3:8033/cmii/doris.be-amd64:2.1.6'
+ command:
+ - /opt/apache-doris/be_entrypoint.sh
+ args:
+ - $(ENV_FE_ADDR)
+ ports:
+ - name: be-port
+ containerPort: 9060
+ protocol: TCP
+ - name: webserver-port
+ containerPort: 8040
+ protocol: TCP
+ - name: heartbeat-port
+ containerPort: 9050
+ protocol: TCP
+ - name: brpc-port
+ containerPort: 8060
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CONFIGMAP_MOUNT_PATH
+ value: /etc/doris
+ - name: USER
+ value: root
+ - name: DORIS_ROOT
+ value: /opt/apache-doris
+ - name: ENV_FE_ADDR
+ value: doris-cluster-fe-service
+ - name: FE_QUERY_PORT
+ value: '9030'
+ resources:
+ limits:
+ cpu: '16'
+ memory: 32Gi
+ requests:
+ cpu: '8'
+ memory: 32Gi
+ volumeMounts:
+ - name: podinfo
+ mountPath: /etc/podinfo
+ - name: be-storage
+ mountPath: /opt/apache-doris/be/storage
+ - name: be-storage
+ mountPath: /opt/apache-doris/be/log
+ - name: doris-cluster-be-conf
+ mountPath: /etc/doris
+ livenessProbe:
+ tcpSocket:
+ port: 9050
+ initialDelaySeconds: 80
+ timeoutSeconds: 180
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 8040
+ scheme: HTTP
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ tcpSocket:
+ port: 9050
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 60
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/apache-doris/be_prestop.sh
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values: [ "doris-cluster-be" ]
+ topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: doris-be-node
+ operator: In
+ values: [ "true" ]
+ schedulerName: default-scheduler
+ volumeClaimTemplates:
+ - metadata:
+ name: be-storage
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ storageClassName: "local-storage"
+ resources:
+ requests:
+ storage: 1500Gi
+ serviceName: doris-cluster-be-internal
+ podManagementPolicy: Parallel
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml b/66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml
new file mode 100644
index 0000000..ee91c0c
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml
@@ -0,0 +1,67 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: doris-cluster-fe-conf
+ namespace: zjejpt-uas
+ labels:
+ app.kubernetes.io/component: fe
+data:
+ fe.conf: |
+ #####################################################################
+ ## The uppercase properties are read and exported by bin/start_fe.sh.
+ ## To see all Frontend configurations,
+ ## see fe/src/org/apache/doris/common/Config.java
+ #####################################################################
+
+ CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+ # Log dir
+ LOG_DIR = ${DORIS_HOME}/log
+
+ # For jdk 8
+ JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
+
+ # Set your own JAVA_HOME
+ # JAVA_HOME=/path/to/jdk/
+
+ ##
+ ## the lowercase properties are read by main program.
+ ##
+
+ # store metadata, must be created before start FE.
+ # Default value is ${DORIS_HOME}/doris-meta
+ # meta_dir = ${DORIS_HOME}/doris-meta
+
+ # Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
+ # jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
+
+ http_port = 8030
+ rpc_port = 9020
+ query_port = 9030
+ edit_log_port = 9010
+ arrow_flight_sql_port = -1
+
+ # Choose one if there are more than one ip except loopback address.
+ # Note that there should at most one ip match this list.
+ # If no ip match this rule, will choose one randomly.
+ # use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
+ # Default value is empty.
+ # priority_networks = 10.10.10.0/24;192.168.0.0/16
+
+ # Advanced configurations
+ # log_roll_size_mb = 1024
+ # INFO, WARN, ERROR, FATAL
+ sys_log_level = INFO
+ # NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式,日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
+ sys_log_mode = ASYNC
+ # sys_log_roll_num = 10
+ # sys_log_verbose_modules = org.apache.doris
+ # audit_log_dir = $LOG_DIR
+ # audit_log_modules = slow_query, query
+ # audit_log_roll_num = 10
+ # meta_delay_toleration_second = 10
+ # qe_max_connection = 1024
+ # qe_query_timeout_second = 300
+ # qe_slow_log_ms = 5000
+ #Fully Qualified Domain Name,完全限定域名,开启后各节点之间通信基于FQDN
+ enable_fqdn_mode = true
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml b/66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml
new file mode 100644
index 0000000..00e37ad
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml
@@ -0,0 +1,160 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: doris-cluster-fe
+ namespace: zjejpt-uas
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: doris-cluster-fe
+ template:
+ metadata:
+ name: doris-cluster-fe
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ volumes:
+ - name: fe-local-storage
+ hostPath:
+ path: /data/doris-fe/storage
+ type: DirectoryOrCreate
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: labels
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ - path: annotations
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.annotations
+ defaultMode: 420
+ - name: doris-cluster-fe-conf
+ configMap:
+ name: doris-cluster-fe-conf
+ defaultMode: 420
+ containers:
+ - name: doris-cluster-fe
+ image: '192.168.10.3:8033/cmii/doris.fe-amd64:2.1.6'
+ command:
+ - /opt/apache-doris/fe_entrypoint.sh
+ args:
+ - $(ENV_FE_ADDR)
+ ports:
+ - name: http-port
+ containerPort: 8030
+ protocol: TCP
+ - name: rpc-port
+ containerPort: 9020
+ protocol: TCP
+ - name: query-port
+ containerPort: 9030
+ protocol: TCP
+ - name: edit-log-port
+ containerPort: 9010
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CONFIGMAP_MOUNT_PATH
+ value: /etc/doris
+ - name: USER
+ value: root
+ - name: DORIS_ROOT
+ value: /opt/apache-doris
+ - name: ENV_FE_ADDR
+ value: doris-cluster-fe-service
+ - name: FE_QUERY_PORT
+ value: '9030'
+ - name: ELECT_NUMBER
+ value: '3'
+ resources:
+ limits:
+ cpu: '16'
+ memory: 32Gi
+ requests:
+ cpu: '8'
+ memory: 32Gi
+ volumeMounts:
+ - name: podinfo
+ mountPath: /etc/podinfo
+ - name: fe-local-storage
+ mountPath: /opt/apache-doris/fe/log
+ - name: fe-local-storage
+ mountPath: /opt/apache-doris/fe/doris-meta
+ - name: doris-cluster-fe-conf
+ mountPath: /etc/doris
+ livenessProbe:
+ tcpSocket:
+ port: 9030
+ initialDelaySeconds: 80
+ timeoutSeconds: 180
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 8030
+ scheme: HTTP
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ tcpSocket:
+ port: 9030
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 60
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/apache-doris/fe_prestop.sh
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - "192.168.10.16"
+ - key: doris-fe-node
+ operator: Exists
+ schedulerName: default-scheduler
+ serviceName: doris-cluster-fe-internal
+ podManagementPolicy: Parallel
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml b/66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml
new file mode 100644
index 0000000..a722df7
--- /dev/null
+++ b/66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml
@@ -0,0 +1,79 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: local-storage
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+reclaimPolicy: Retain
+allowedTopologies:
+ - matchLabelExpressions:
+ - key: doris-be-node
+ values: ["true"]
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: doris-be-pv-node1
+spec:
+ capacity:
+ storage: 1500Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: local-storage
+ local:
+ path: /data/doris-be/storage
+ nodeAffinity:
+ required:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values: ["192.168.10.17"]
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: doris-be-pv-node2
+spec:
+ capacity:
+ storage: 1500Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: local-storage
+ local:
+ path: /data/doris-be/storage
+ nodeAffinity:
+ required:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values: ["192.168.10.18"]
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: doris-be-pv-node3
+spec:
+ capacity:
+ storage: 1500Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: local-storage
+ local:
+ path: /data/doris-be/storage
+ nodeAffinity:
+ required:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values: ["192.168.10.19"]
+
diff --git a/66-202505-浙江二级监管/install_docker_offline.sh b/66-202505-浙江二级监管/install_docker_offline.sh
new file mode 100644
index 0000000..e9e30ee
--- /dev/null
+++ b/66-202505-浙江二级监管/install_docker_offline.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+set -e
+
+# 定义变量
+DOCKER_TAR="docker-25.0.0.tgz"
+SYSTEMD_DIR="/lib/systemd/system"
+BIN_DIR="/usr/local/bin"
+
+# 0. 停止旧版本服务(如有)
+sudo systemctl stop docker containerd.socket containerd 2>/dev/null || true
+
+# 1. 解压Docker二进制包
+echo "解压Docker二进制包..."
+sudo tar -xzvf ${DOCKER_TAR} -C ${BIN_DIR} --strip-components=1
+
+# 2. 确保二进制文件可执行
+sudo chmod +x ${BIN_DIR}/{containerd,ctr,dockerd,docker,runc}
+
+# 3. 配置containerd.service
+echo "配置containerd服务..."
+cat > ${SYSTEMD_DIR}/containerd.service <<'EOF'
+[Unit]
+Description=containerd container runtime
+Documentation=https://containerd.io
+After=network.target local-fs.target
+
+[Service]
+ExecStartPre=-/sbin/modprobe overlay
+ExecStart=${BIN_DIR}/containerd
+KillMode=process
+Delegate=yes
+LimitNOFILE=1048576
+LimitNPROC=infinity
+LimitCORE=infinity
+TasksMax=infinity
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+# 4. 配置docker.service
+echo "配置Docker服务..."
+cat > ${SYSTEMD_DIR}/docker.service <<'EOF'
+[Unit]
+Description=Docker Application Container Engine
+Documentation=https://docs.docker.com
+After=network-online.target docker.socket containerd.service
+Wants=network-online.target
+Requires=docker.socket containerd.service
+
+[Service]
+Type=notify
+ExecStart=${BIN_DIR}/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
+ExecReload=/bin/kill -s HUP $MAINPID
+TimeoutSec=0
+RestartSec=2
+Restart=always
+StartLimitBurst=3
+StartLimitInterval=60s
+LimitNOFILE=infinity
+LimitNPROC=infinity
+LimitCORE=infinity
+TasksMax=infinity
+Delegate=yes
+KillMode=process
+OOMScoreAdjust=-500
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+# 5. 配置docker.socket
+echo "配置Docker Socket..."
+cat > ${SYSTEMD_DIR}/docker.socket <<'EOF'
+[Unit]
+Description=Docker Socket for the API
+PartOf=docker.service
+
+[Socket]
+ListenStream=/var/run/docker.sock
+SocketMode=0660
+SocketUser=root
+SocketGroup=docker
+
+[Install]
+WantedBy=sockets.target
+EOF
+
+# 6. 创建docker用户组
+echo "配置用户组..."
+sudo groupadd -f docker
+sudo usermod -aG docker $USER 2>/dev/null && echo "已添加用户 $USER 到docker组"
+
+# 7. 启用并启动服务
+echo "启动服务..."
+sudo systemctl daemon-reload
+sudo systemctl enable --now containerd docker
+
+# 8. 验证安装
+echo -e "\n验证状态:"
+sudo systemctl status containerd docker | grep "Active:"
+echo -e "\nDocker版本:"
+${BIN_DIR}/docker --version
diff --git a/66-202505-浙江二级监管/sshd_config b/66-202505-浙江二级监管/sshd_config
new file mode 100644
index 0000000..8393574
--- /dev/null
+++ b/66-202505-浙江二级监管/sshd_config
@@ -0,0 +1,143 @@
+# $OpenBSD: sshd_config,v 1.104 2021/07/02 05:11:21 dtucker Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options override the
+# default value.
+
+# To modify the system-wide sshd configuration, create a *.conf file under
+# /etc/ssh/sshd_config.d/ which will be automatically included below
+#Include /etc/ssh/sshd_config.d/*.conf
+
+# If you want to change the port on a SELinux system, you have to tell
+# SELinux about this change.
+# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
+#
+Port 2202
+#AddressFamily any
+#ListenAddress 0.0.0.0
+#ListenAddress ::
+
+HostKey /etc/ssh/ssh_host_rsa_key
+HostKey /etc/ssh/ssh_host_ecdsa_key
+HostKey /etc/ssh/ssh_host_ed25519_key
+
+# Ciphers and keying
+#RekeyLimit default none
+
+# Logging
+#SyslogFacility AUTH
+SyslogFacility AUTH
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin yes
+#StrictModes yes
+#MaxAuthTries 6
+#MaxSessions 10
+
+#PubkeyAuthentication yes
+
+# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
+# but this is overridden so installations will only check .ssh/authorized_keys
+AuthorizedKeysFile .ssh/authorized_keys
+
+#AuthorizedPrincipalsFile none
+
+#AuthorizedKeysCommand none
+#AuthorizedKeysCommandUser nobody
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+#HostbasedAuthentication no
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication yes
+#PermitEmptyPasswords no
+
+# Change to no to disable s/key passwords
+KbdInteractiveAuthentication no
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+#KerberosUseKuserok yes
+
+# GSSAPI options
+GSSAPIAuthentication yes
+GSSAPICleanupCredentials no
+#GSSAPIStrictAcceptorCheck yes
+#GSSAPIKeyExchange no
+#GSSAPIEnablek5users no
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the KbdInteractiveAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via KbdInteractiveAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and KbdInteractiveAuthentication to 'no'.
+# WARNING: 'UsePAM no' is not supported in openEuler and may cause several
+# problems.
+UsePAM yes
+
+#AllowAgentForwarding yes
+#AllowTcpForwarding yes
+#GatewayPorts no
+X11Forwarding yes
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+#PermitTTY yes
+PrintMotd no
+#PrintLastLog yes
+#TCPKeepAlive yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+UseDNS no
+#PidFile /var/run/sshd.pid
+#MaxStartups 10:30:100
+#PermitTunnel no
+#ChrootDirectory none
+#VersionAddendum none
+
+# no default banner path
+#Banner none
+
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+
+# override default of no subsystems
+Subsystem sftp /usr/libexec/openssh/sftp-server -l INFO -f AUTH
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# PermitTTY no
+# ForceCommand cvs server
+#CheckUserSplash yes
+
+PubkeyAuthentication yes
+RSAAuthentication yes
+IgnoreRhosts yes
+PermitEmptyPasswords no
+Banner /etc/issue.net
+AllowTcpForwarding yes
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/分块压缩合.md b/66-202505-浙江二级监管/分块压缩合.md
new file mode 100644
index 0000000..da89fbc
--- /dev/null
+++ b/66-202505-浙江二级监管/分块压缩合.md
@@ -0,0 +1,55 @@
+
+
+
+下面是压缩Docker镜像为分块压缩包及合并的命令:
+
+---
+
+### **1. 压缩并分割镜像**
+#### **推荐方式:直接通过管道压缩并分块**
+使用 `gzip` 压缩(速度快,中等压缩率):
+```bash
+docker save | gzip | split -b 5G - image_part_.gz
+```
+或使用 `xz` 压缩(高压缩率,速度慢):
+```bash
+docker save | xz -T0 | split -b 5G - image_part_.xz
+```
+**参数说明**:
+- ``:替换为实际的镜像名称和标签。
+- `split -b 5G`:将输入流分割为每块最大5GB。
+- `image_part_.gz` 或 `image_part_.xz`:分块文件前缀,生成的文件名类似 `image_part_.gz.aa`、`image_part_.gz.ab` 等。
+
+---
+
+### **2. 合并分块并还原镜像**
+#### **gzip 压缩分块的合并与加载**
+```bash
+cat image_part_.gz.* | gunzip | docker load
+```
+#### **xz 压缩分块的合并与加载**
+```bash
+cat image_part_.xz.* | xz -d | docker load
+```
+
+---
+
+### **工作原理**
+1. **压缩分块**:
+ - `docker save` 输出镜像的 TAR 存档到标准输出。
+ - 通过管道将 TAR 数据实时压缩(`gzip` 或 `xz`)。
+ - `split` 将压缩后的流按 `5G` 大小分割为多个文件。
+
+2. **合并还原**:
+ - `cat` 按顺序合并所有分块文件。
+ - `gunzip` 或 `xz -d` 解压合并后的流。
+ - `docker load` 从解压后的 TAR 流中加载镜像。
+
+---
+
+### **注意事项**
+- **分块命名**:`split` 默认生成 `aa`, `ab` 等后缀。若分块超过几百个,需用 `-a <长度>` 指定后缀长度(如 `-a 3` 生成 `001`)。
+- **磁盘空间**:合并时需要足够的临时空间存储解压后的完整 TAR 数据(如原镜像为24GB,需至少24GB空间)。
+- **压缩选择**:
+ - `gzip`:速度较快,适合快速处理。
+ - `xz`:压缩率更高(尤其适合二进制数据),但需要更多时间和CPU资源。
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/压缩文件包.txt b/66-202505-浙江二级监管/压缩文件包.txt
new file mode 100644
index 0000000..9fd6e0e
--- /dev/null
+++ b/66-202505-浙江二级监管/压缩文件包.txt
@@ -0,0 +1,20 @@
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uas-gateway=2.1-demo-20250527-licence.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uas-lifecycle=2.1-demo-20250527-licence.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-notice=pro-6.0.8.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-material-warehouse=6.2.0-050701.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-platform-uasms=2.1-demo-20250527.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-platform-uas=2.1-demo-20250527.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uavms-pyfusion=6.3.6.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-iot-dispatcher=6.2.0-focus.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-sense-adapter=6.2.0-250415.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-watchdog=1.0.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-live-operator=5.2.0.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=srs=v5.0.195-arm.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-srs-oss-adaptor=2023-SA-skip-CHL.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/docker=cmii=doris.fe-ubuntu=2.1.6.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=doris.be-amd64=2.1.6.tar.gz
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzaa
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzab
+https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzac
+https://oss.demo.uavcmlc.com/cmlc-installation/gb28181/v5.7.0-x86/gb28181_x86_2.7.3_20250414.img.tar
+https://oss.demo.uavcmlc.com/cmlc-installation/gb28181/docker-gb28181.tar
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml b/66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml
new file mode 100644
index 0000000..db86412
--- /dev/null
+++ b/66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml
@@ -0,0 +1,1115 @@
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 5.7.0
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ cmii.type: middleware
+ helm.sh/chart: emqx-1.1.0
+ name: helm-emqxs
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ cmii.type: middleware
+ serviceName: helm-emqxs-headless
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 5.7.0
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ cmii.type: middleware
+ helm.sh/chart: emqx-1.1.0
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: emqx.node
+ operator: In
+ values:
+ - master
+ containers:
+ - envFrom:
+ - configMapRef:
+ name: helm-emqxs-env
+ image: 192.168.10.3:8033/cmii/emqx:5.5.1
+ imagePullPolicy: Always
+ name: helm-emqxs
+ ports:
+ - containerPort: 1883
+ name: mqtt
+ protocol: TCP
+ - containerPort: 8883
+ name: mqttssl
+ protocol: TCP
+ - containerPort: 8081
+ name: mgmt
+ protocol: TCP
+ - containerPort: 8083
+ name: ws
+ protocol: TCP
+ - containerPort: 8084
+ name: wss
+ protocol: TCP
+ - containerPort: 18083
+ name: dashboard
+ protocol: TCP
+ - containerPort: 4370
+ name: ekka
+ protocol: TCP
+ resources:
+ limits:
+ memory: 8Gi
+ cpu: "8"
+ requests:
+ memory: "8Gi"
+ cpu: "4"
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /opt/emqx/data/mnesia
+ name: emqx-data
+ - mountPath: /opt/emqx/etc/plugins/emqx_auth_mnesia.conf
+ name: helm-emqxs-cm
+ subPath: emqx_auth_mnesia.conf
+ - mountPath: /opt/emqx/data/loaded_plugins
+ name: helm-emqxs-cm
+ subPath: loaded_plugins
+ dnsPolicy: ClusterFirst
+ imagePullSecrets:
+ - name: harborsecret
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ serviceAccount: helm-emqxs
+ serviceAccountName: helm-emqxs
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: emqx_auth_mnesia.conf
+ path: emqx_auth_mnesia.conf
+ - key: acl.conf
+ path: acl.conf
+ - key: loaded_plugins
+ path: loaded_plugins
+ name: helm-emqxs-cm
+ name: helm-emqxs-cm
+ updateStrategy:
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: Helm
+ cmii.app: live-srs
+ cmii.type: midware
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+ name: helm-live-srs-rtc
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ srs-role: rtc
+ serviceName: helm-live-srsrtc-svc
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ srs-role: rtc
+ spec:
+ containers:
+ - env:
+ - name: CANDIDATE
+ value: lingyun.zyjctech.com
+ image: 192.168.10.3:8033/cmii/srs:5.0-r3
+ imagePullPolicy: IfNotPresent
+ name: srs-rtc
+ ports:
+ - containerPort: 32006
+ name: srs-rtmp
+ protocol: TCP
+ - containerPort: 1985
+ name: srs-api
+ protocol: TCP
+ - containerPort: 8080
+ name: srs-flv
+ protocol: TCP
+ - containerPort: 20090
+ name: srs-webrtc
+ protocol: UDP
+ - containerPort: 30556
+ name: srs-srt
+ protocol: UDP
+ resources:
+ limits:
+ cpu: "4"
+ memory: 4Gi
+ requests:
+ cpu: 1200m
+ memory: 256Mi
+ securityContext:
+ privileged: true
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /usr/local/srs/conf/docker.conf
+ name: srs-conf-file
+ subPath: docker.conf
+ - mountPath: /home/dvr
+ name: srs-vol
+ subPath: default/helm-live/dvr
+ - mountPath: /home/hls
+ name: srs-vol
+ subPath: default/helm-live/hls
+ - env:
+ - name: OSS_ENDPOINT
+ value: http://192.168.10.2:9000
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: B#923fC7mk
+ - name: OSS_BUCKET
+ value: live-srs-hls
+ - name: SRS_OP
+ value: http://helm-live-op-svc-v2:8080
+ - name: MYSQL_ENDPOINT
+ value: 192.168.10.10:33061
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: "yes"
+ image: 192.168.10.3:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ imagePullPolicy: Always
+ name: oss-adaptor
+ resources:
+ limits:
+ cpu: "4"
+ memory: 4Gi
+ requests:
+ cpu: 1200m
+ memory: 256Mi
+ securityContext:
+ privileged: true
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /cmii/share/hls
+ name: srs-vol
+ subPath: default/helm-live/hls
+ dnsPolicy: ClusterFirst
+ imagePullSecrets:
+ - name: harborsecret
+ nodeSelector:
+ kubernetes.io/hostname: 192.168.10.2
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ name: helm-live-srs-cm
+ name: srs-conf-file
+ - emptyDir:
+ sizeLimit: 10Gi
+ name: srs-vol
+ updateStrategy:
+ rollingUpdate:
+ partition: 0
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 5.7.0
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ name: helm-mongo
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ serviceName: helm-mongo
+ template:
+ metadata:
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 5.7.0
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: mongo.node
+ operator: In
+ values:
+ - master
+ containers:
+ - env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ image: 192.168.10.3:8033/cmii/mongo:5.0
+ imagePullPolicy: IfNotPresent
+ name: helm-mongo
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ resources:
+ limits:
+ memory: 30Gi
+ cpu: "12"
+ requests:
+ memory: "12Gi"
+ cpu: "12"
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /data/db
+ name: mongo-data
+ subPath: default/helm-mongo/data/db
+ dnsPolicy: ClusterFirst
+ imagePullSecrets:
+ - name: harborsecret
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+ updateStrategy:
+ rollingUpdate:
+ partition: 0
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: zjyd
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+ name: helm-mysql
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 0
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: zjyd
+ cmii.app: mysql
+ cmii.type: middleware
+ serviceName: helm-mysql
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: zjyd
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+ spec:
+ affinity: {}
+ containers:
+ - env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: mysql-root-password
+ name: helm-mysql
+ - name: MYSQL_DATABASE
+ value: cmii
+ image: 192.168.10.3:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: mysql
+ protocol: TCP
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ resources: {}
+ securityContext:
+ runAsUser: 1001
+ startupProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /bitnami/mysql
+ name: mysql-data
+ - mountPath: /docker-entrypoint-initdb.d
+ name: custom-init-scripts
+ - mountPath: /opt/bitnami/mysql/conf/my.cnf
+ name: config
+ subPath: my.cnf
+ dnsPolicy: ClusterFirst
+ imagePullSecrets:
+ - name: harborsecret
+ initContainers:
+ - command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ image: 192.168.10.3:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: Always
+ name: change-volume-permissions
+ resources: {}
+ securityContext:
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /bitnami/mysql
+ name: mysql-data
+ nodeSelector:
+ mysql-deploy: "true"
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext:
+ fsGroup: 1001
+ serviceAccount: helm-mysql
+ serviceAccountName: helm-mysql
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - configMap:
+ defaultMode: 420
+ name: helm-mysql
+ name: config
+ - configMap:
+ defaultMode: 420
+ name: helm-mysql-init-scripts
+ name: custom-init-scripts
+ - hostPath:
+ path: /var/lib/docker/mysql-pv/zjyd/
+ type: ""
+ name: mysql-data
+ updateStrategy:
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 5.7.0
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ name: helm-nacos
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ serviceName: helm-nacos
+ template:
+ metadata:
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: 5.7.0
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ spec:
+ affinity: {}
+ containers:
+ - env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ key: mysql.db.name
+ name: helm-nacos-cm
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ key: mysql.port
+ name: helm-nacos-cm
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ key: mysql.user
+ name: helm-nacos-cm
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ key: mysql.password
+ name: helm-nacos-cm
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ key: mysql.db.host
+ name: helm-nacos-cm
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: hostname
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+ image: 192.168.10.3:8033/cmii/nacos-server:v2.1.2
+ imagePullPolicy: IfNotPresent
+ name: nacos-server
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ protocol: TCP
+ - containerPort: 9848
+ name: tcp-9848
+ protocol: TCP
+ - containerPort: 9849
+ name: tcp-9849
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ dnsPolicy: ClusterFirst
+ imagePullSecrets:
+ - name: harborsecret
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ updateStrategy:
+ rollingUpdate:
+ partition: 0
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: rabbitmq
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: wxyd1m
+ helm.sh/chart: rabbitmq-8.26.1
+ name: helm-rabbitmq
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: wxyd1m
+ serviceName: helm-rabbitmq-headless
+ template:
+ metadata:
+ annotations:
+ checksum/config: ee419cab020927201d3322a38802f988e787d179b3dfcb4e2bef40d5d524d363
+ checksum/secret: 0c350f0d069c68734be7c5b7b05fae72e096020ca56e33656de9ed2bde1a7320
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/managed-by: rabbitmq
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: wxyd1m
+ helm.sh/chart: rabbitmq-8.26.1
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: rabbitmq.node
+ operator: In
+ values:
+ - master
+ containers:
+ - env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: helm-rabbitmq-headless
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
+ - name: K8S_HOSTNAME_SUFFIX
+ value: .$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
+ - name: RABBITMQ_MNESIA_DIR
+ value: /bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: '-'
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ key: rabbitmq-erlang-cookie
+ name: helm-rabbitmq
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: admin
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: rabbitmq-password
+ name: helm-rabbitmq
+ - name: RABBITMQ_PLUGINS
+ value: rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap
+ image: 192.168.10.3:8033/cmii/rabbitmq:3.11.26-debian-11-r2
+ imagePullPolicy: Always
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ failureThreshold: 6
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 20
+ name: rabbitmq
+ ports:
+ - containerPort: 5672
+ name: amqp
+ protocol: TCP
+ - containerPort: 25672
+ name: dist
+ protocol: TCP
+ - containerPort: 15672
+ name: dashboard
+ protocol: TCP
+ - containerPort: 4369
+ name: epmd
+ protocol: TCP
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 20
+ resources:
+ limits:
+ memory: 6Gi
+ cpu: "4"
+ requests:
+ memory: "4Gi"
+ cpu: "2"
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /bitnami/rabbitmq/conf
+ name: configuration
+ - mountPath: /bitnami/rabbitmq/mnesia
+ name: data
+ dnsPolicy: ClusterFirst
+ initContainers:
+ - args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ command:
+ - /bin/bash
+ image: 192.168.10.3:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: Always
+ name: volume-permissions
+ resources: {}
+ securityContext:
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /bitnami/rabbitmq/mnesia
+ name: data
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ serviceAccount: helm-rabbitmq
+ serviceAccountName: helm-rabbitmq
+ terminationGracePeriodSeconds: 120
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ name: helm-rabbitmq-config
+ name: configuration
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
+ updateStrategy:
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/component: master
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: zjyd
+ cmii.app: redis
+ cmii.type: middleware
+ octopus.control: redis-db-wdd
+ name: helm-redis-master
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: master
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: zjyd
+ cmii.app: redis
+ cmii.type: middleware
+ serviceName: helm-redis-headless
+ template:
+ metadata:
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/component: master
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: zjyd
+ cmii.app: redis
+ cmii.type: middleware
+ octopus.control: redis-db-wdd
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: redis.node
+ operator: In
+ values:
+ - master
+ containers:
+ - args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ command:
+ - /bin/bash
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: redis-password
+ name: helm-redis
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ image: 192.168.10.3:8033/cmii/redis:6.2.6-debian-10-r0
+ imagePullPolicy: Always
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ failureThreshold: 5
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 6
+ name: redis
+ ports:
+ - containerPort: 6379
+ name: redis
+ protocol: TCP
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ failureThreshold: 5
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 2
+ resources:
+ limits:
+ cpu: "4"
+ memory: 12Gi
+ requests:
+ cpu: "4"
+ memory: 12Gi
+ securityContext:
+ runAsUser: 1001
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /opt/bitnami/scripts/start-scripts
+ name: start-scripts
+ - mountPath: /health
+ name: health
+ - mountPath: /data
+ name: redis-data
+ - mountPath: /opt/bitnami/redis/mounted-etc
+ name: config
+ - mountPath: /opt/bitnami/redis/etc/
+ name: redis-tmp-conf
+ - mountPath: /tmp
+ name: tmp
+ dnsPolicy: ClusterFirst
+ imagePullSecrets:
+ - name: harborsecret
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext:
+ fsGroup: 1001
+ serviceAccount: helm-redis
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - configMap:
+ defaultMode: 493
+ name: helm-redis-scripts
+ name: start-scripts
+ - configMap:
+ defaultMode: 493
+ name: helm-redis-health
+ name: health
+ - configMap:
+ defaultMode: 420
+ name: helm-redis-configuration
+ name: config
+ - emptyDir: {}
+ name: redis-tmp-conf
+ - emptyDir: {}
+ name: tmp
+ - emptyDir: {}
+ name: redis-data
+ updateStrategy:
+ rollingUpdate:
+ partition: 0
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/component: replica
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: zjyd
+ octopus.control: redis-db-wdd
+ name: helm-redis-replicas
+ namespace: zjyd
+spec:
+ podManagementPolicy: OrderedReady
+ replicas: 0
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: replica
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: zjyd
+ serviceName: helm-redis-headless
+ template:
+ metadata:
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/component: replica
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: zjyd
+ octopus.control: redis-db-wdd
+ spec:
+ containers:
+ - args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ command:
+ - /bin/bash
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.zjyd.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: redis-password
+ name: helm-redis
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: redis-password
+ name: helm-redis
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ image: 192.168.10.3:8033/cmii/redis:6.2.6-debian-10-r0
+ imagePullPolicy: Always
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ failureThreshold: 5
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 6
+ name: redis
+ ports:
+ - containerPort: 6379
+ name: redis
+ protocol: TCP
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ failureThreshold: 5
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 2
+ resources:
+ limits:
+ cpu: "4"
+ memory: 8Gi
+ requests:
+ cpu: "4"
+ memory: 8Gi
+ securityContext:
+ runAsUser: 1001
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /opt/bitnami/scripts/start-scripts
+ name: start-scripts
+ - mountPath: /health
+ name: health
+ - mountPath: /data
+ name: redis-data
+ - mountPath: /opt/bitnami/redis/mounted-etc
+ name: config
+ - mountPath: /opt/bitnami/redis/etc
+ name: redis-tmp-conf
+ dnsPolicy: ClusterFirst
+ imagePullSecrets:
+ - name: harborsecret
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext:
+ fsGroup: 1001
+ serviceAccount: helm-redis
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - configMap:
+ defaultMode: 493
+ name: helm-redis-scripts
+ name: start-scripts
+ - configMap:
+ defaultMode: 493
+ name: helm-redis-health
+ name: health
+ - configMap:
+ defaultMode: 420
+ name: helm-redis-configuration
+ name: config
+ - emptyDir: {}
+ name: redis-tmp-conf
+ - emptyDir: {}
+ name: redis-data
+ updateStrategy:
+ rollingUpdate:
+ partition: 0
+ type: RollingUpdate
diff --git a/66-202505-浙江二级监管/已有部署备份/install_auth.sh b/66-202505-浙江二级监管/已有部署备份/install_auth.sh
new file mode 100644
index 0000000..6415e69
--- /dev/null
+++ b/66-202505-浙江二级监管/已有部署备份/install_auth.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+scp -P 2202 /root/wdd/install/auth_file.json root@192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uav-watchdog/
+
+ssh -p 2202 root@192.168.10.2 "ls /data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uav-watchdog"
+
+
+# 生成授权文件
+curl http://localhost:8080/api/authorization/generate
+
+# 处理授权码
+curl -X POST \
+ http://localhost:8080/api/authorization/auth \
+ -H 'Content-Type: application/json' \
+ --data-binary @auth_file.json
+
+
+#
+curl http://localhost:8080/api/authorization/hosts
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/已有部署备份/nginx-web.conf b/66-202505-浙江二级监管/已有部署备份/nginx-web.conf
new file mode 100644
index 0000000..0d2fee4
--- /dev/null
+++ b/66-202505-浙江二级监管/已有部署备份/nginx-web.conf
@@ -0,0 +1,144 @@
+
+ ###### 监管平台转发
+ location ^~ /uas {
+ #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+ add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ #开启HTTP严格传输安全HSTS
+ add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+ proxy_pass http://localhost:30500;
+ client_max_body_size 5120m;
+ client_body_buffer_size 5120m;
+ client_body_timeout 6000s;
+ proxy_send_timeout 10000s;
+ proxy_read_timeout 10000s;
+ proxy_connect_timeout 600s;
+ proxy_max_temp_file_size 5120m;
+ proxy_request_buffering on;
+ proxy_buffering off;
+ proxy_buffer_size 4k;
+ proxy_buffers 4 12k;
+ proxy_set_header Host fake-domain.zjejpt-uas.io;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ }
+
+ location / {
+ #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+ add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ #开启HTTP严格传输安全HSTS
+ add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+ proxy_pass http://localhost:30500;
+ client_max_body_size 5120m;
+ client_body_buffer_size 5120m;
+ client_body_timeout 6000s;
+ proxy_send_timeout 10000s;
+ proxy_read_timeout 10000s;
+ proxy_connect_timeout 600s;
+ proxy_max_temp_file_size 5120m;
+ proxy_request_buffering on;
+ proxy_buffering off;
+ proxy_buffer_size 4k;
+ proxy_buffers 4 12k;
+ proxy_set_header Host fake-domain.zjyd.io;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ }
+
+ location /_AMapService/v4/map/styles {
+ #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+ add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ #开启HTTP严格传输安全HSTS
+ add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+ set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
+ proxy_pass https://webapi.amap.com/v4/ap/styles;
+ }
+
+ location /_AMapService/ {
+ #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+ add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ #开启HTTP严格传输安全HSTS
+ add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+ set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
+ proxy_pass https://restapi.amap.com/;
+ }
+
+ location /rtc/v1/ {
+ #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+ add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ #开启HTTP严格传输安全HSTS
+ add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+ add_header Access-Control-Allow-Headers X-Requested-With;
+ add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
+ proxy_pass http://192.168.10.3:30985/rtc/v1/;
+ }
+
+
+
+### 视频国标GB28181 ###
+
+# location /zlm/flv/ {
+# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+# #开启HTTP严格传输安全HSTS
+# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+# add_header Access-Control-Allow-Headers X-Requested-With;
+# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
+# proxy_pass http://192.168.10.25:7088/;
+# }
+# location /zlm/hls/ {
+# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+# #开启HTTP严格传输安全HSTS
+# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+# add_header Access-Control-Allow-Headers X-Requested-With;
+# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
+# proxy_pass http://192.168.10.25:7088/zlm/hls/;
+# }
+# location /index/api/ {
+# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+# #开启HTTP严格传输安全HSTS
+# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+# add_header Access-Control-Allow-Headers X-Requested-With;
+# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
+# proxy_pass http://192.168.10.25:7088/index/api/;
+# }
+
+
+ #location /video_feed {
+ # proxy_pass http://192.168.10.12:5000;
+ # proxy_http_version 1.1;
+ # proxy_set_header Upgrade $http_upgrade;
+ # proxy_set_header Connection "upgrade";
+ # proxy_set_header Host $host;
+ # proxy_cache_bypass $http_upgrade;
+ #}
+ #location /video_person {
+ # proxy_pass http://192.168.10.12:5001;
+ # proxy_http_version 1.1;
+ # proxy_set_header Upgrade $http_upgrade;
+ # proxy_set_header Connection "upgrade";
+ # proxy_set_header Host $host;
+ # proxy_cache_bypass $http_upgrade;
+ #}
+ #location /video {
+ # #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+ # add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ # #开启HTTP严格传输安全HSTS
+ # add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
+ # alias /data/test/;
+ # index 10.mp4;
+ #}
+
+ #location ~ ^/\w*/actuator/ {
+ # return 403;
+ #}
+
+ location ~ ^/.*/(actuator|swagger-resources|api-docs|health).* {
+ add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ return 404;
+ }
diff --git a/66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf b/66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf
new file mode 100644
index 0000000..25c5dcd
--- /dev/null
+++ b/66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf
@@ -0,0 +1,274 @@
+user www www;
+worker_processes auto;
+
+error_log logs/error.log warn;
+pid /var/run/nginx/nginx.pid;
+
+events {
+ worker_connections 65535;
+}
+
+stream{
+ include /data/nginx/conf/blacklist.conf;
+ include /data/nginx/conf/blacklist_zhejiang.conf;
+ deny all;
+ #飞行数据-mqtt
+ upstream tcp31883{
+ server 127.0.0.1:32883; #中移凌云飞行数据
+ }
+ server{
+ listen 31883;
+ proxy_pass tcp31883;
+ }
+
+ #飞行数据-mqtt-websocket
+ upstream tcp38083{
+ server 127.0.0.1:39083;
+ }
+ server{
+ listen 38083;
+ proxy_pass tcp38083;
+ }
+
+ #视频流媒体-RTMP
+ upstream tcp31935{
+ server 127.0.0.1:32935;
+ }
+ server{
+ listen 31935;
+ proxy_pass tcp31935;
+ }
+
+ #视频流媒体-WEBRTC
+ upstream udp30090{
+ server 127.0.0.1:31090;
+ }
+ server{
+ listen 30090 udp;
+ proxy_pass udp30090;
+ }
+
+ #视频流播放TCP端口
+ #upstream tcp30080{
+ # server 127.0.0.1:31080;
+ #}
+ #server{
+ # listen 30080;
+ # proxy_pass tcp30080;
+ #}
+
+ #rtsp-控制TCP端口
+ #upstream tcp30554{
+ # server 127.0.0.1:32554;
+ #}
+ #server{
+ # listen 30554;
+ # proxy_pass tcp30554;
+ #}
+
+ #rtsp-数据TCP端口
+ #upstream tcp30556{
+ # server 127.0.0.1:32556;
+ #}
+ #server{
+ # listen 30556;
+ # proxy_pass tcp30556;
+ #}
+
+ #rtsp-数据UDP端口
+ #upstream udp30556{
+ # server 127.0.0.1:32556;
+ #}
+ #server{
+ # listen 30556 udp;
+ # proxy_pass udp30556;
+ #}
+
+ #模拟数据测试UDP端口
+ #upstream udp30556{
+ # server 127.0.0.1:31556;
+ #}
+ #server{
+ # listen 30556 udp;
+ # proxy_pass udp30556;
+ #}
+
+ #RabbitMQ控制台端口
+# server{
+# listen 32002;
+# proxy_pass 192.168.10.11:15672;
+# }
+
+}
+
+
+http {
+ include /data/nginx/conf/blacklist.conf;
+ include /data/nginx/conf/blacklist_zhejiang.conf;
+ deny all;
+ include mime.types;
+ default_type application/octet-stream;
+ ## 去除版本信息 ##
+ server_tokens off;
+ #error日志更换
+ #fastcgi_intercept_errors on;
+ error_log logs/error.log warn;
+ #####
+
+ sendfile on;
+ keepalive_timeout 60;
+ client_body_timeout 30s;
+ client_header_timeout 30s;
+ send_timeout 30s;
+ gzip on;
+ #more_clear_headers 'Server';
+ add_header X-Frame-Options SAMEORIGIN always;
+ add_header X-Content-Type-Options nosniff;
+ #开启HTTP严格传输安全HSTS
+ add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
+
+
+
+ underscores_in_headers on;
+ log_format main '$remote_addr - $remote_user [$time_local]'
+ '#"$request_method $scheme://$host$request_uri $server_protocol" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for" - "$request_time"';
+ access_log /data/nginx/logs/access.log main;
+
+ server {
+ listen 8088 ssl;
+ server_name lingyun.zyjctech.com
+ index index.jsp index.htm index.html;
+
+ ### 跨域设置(临时) ###
+ add_header 'Access-Control-Allow-Origin' '*';
+ add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
+ add_header 'Access-Control-Allow-Headers' 'Origin, Content-Type, Accept, Authorization';
+ if ($request_method = 'OPTIONS') {
+ return 204;
+ }
+ # 禁止 iframe 嵌套
+ add_header X-Frame-Options SAMEORIGIN always;
+ add_header X-Content-Type-Options nosniff;
+ add_header X-XSS-Protection "1; mode=block";
+ #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
+ add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
+ #开启HTTP严格传输安全HSTS
+ add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
+
+
+ ### ssl配置 ###
+
+ ssl_certificate /data/nginx/conf/zyjctech.com_cert_chain.pem;
+ ssl_certificate_key /data/nginx/conf/zyjctech.com_key.key;
+ ssl_session_timeout 10m;
+ ## 新增 ##
+ #ssl_stapling_verify on;
+ #ssl_session_cache shared:SSL:50m;
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+ #ssl_prefer_server_ciphers off;
+ ssl_prefer_server_ciphers on;
+ ssl_protocols TLSv1.2 TLSv1.3;
+
+
+ ##############################
+
+ include /data/nginx/conf/vhost8088/*.conf;
+ client_max_body_size 1024m;
+ client_body_buffer_size 512k;
+ client_header_timeout 3m;
+ send_timeout 3m;
+ proxy_connect_timeout 600;
+ proxy_read_timeout 600;
+ proxy_send_timeout 600;
+###自定义403返回拦截的ip#########
+ error_page 403 /error.html;
+
+ location = /error.html {
+ default_type text/plain;
+ return 403 "Access failed. Please contact the administrator to add the IP whitelist IP:$remote_addr";
+ }
+
+ }
+
+
+ #K8S DashBoard
+# server {
+# listen 30554 ssl;
+# ssl_certificate /data/nginx/conf/zyjctech.com_cert_chain.pem;
+# ssl_certificate_key /data/nginx/conf/zyjctech.com_key.key;
+# ssl_session_timeout 5m;
+
+# ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+# ssl_prefer_server_ciphers off;
+# ssl_protocols TLSv1.2 TLSv1.3;
+
+# client_max_body_size 1024m;
+# client_body_buffer_size 256k;
+# client_header_timeout 3m;
+# client_body_timeout 3m;
+# send_timeout 3m;
+
+# proxy_connect_timeout 600;
+# proxy_read_timeout 600;
+# proxy_send_timeout 600;
+# proxy_buffer_size 256k;
+# proxy_buffers 4 256k;
+# proxy_busy_buffers_size 256k;
+
+# location / {
+# proxy_pass https://127.0.0.1:32000;
+# proxy_set_header Host $host;
+# proxy_set_header X-Real-IP $remote_addr;
+# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+# }
+# }
+
+
+## minio控制台 ##
+
+# server {
+# listen 32002; #或者用80端口也可以
+# server_name 188.106.25.136; #可以用域名
+# add_header X-Frame-Options SAMEORIGIN always;
+# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
+# location / {
+# proxy_next_upstream http_500 http_502 http_503 http_504 error timeout invalid_header;
+# proxy_set_header Host $http_host;
+# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+# proxy_pass http://188.106.25.132:9001;
+# expires 0;
+# }
+# }
+
+## AiMapServer ##
+# server {
+# listen 32007; #或者用80端口也可以
+# server_name 188.106.25.136; #可以用域名
+# add_header X-Frame-Options SAMEORIGIN always;
+# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
+
+# location / {
+# proxy_pass http://188.106.25.222:5090/aimap-server/manager/login;
+# proxy_set_header Host $host;
+# proxy_set_header X-Real-IP $remote_addr;
+# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+# }
+# location /aimap-server {
+# proxy_pass http://188.106.25.222:5090/aimap-server;
+# proxy_set_header Host $host;
+# proxy_set_header X-Real-IP $remote_addr;
+# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+# }
+# location /gisapi {
+# proxy_pass http://188.106.25.222:5090/gisapi;
+# proxy_set_header Host $host;
+# proxy_set_header X-Real-IP $remote_addr;
+# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+# }
+# }
+
+
+
+ }
diff --git a/66-202505-浙江二级监管/已有部署备份/主机授权文件.json b/66-202505-浙江二级监管/已有部署备份/主机授权文件.json
new file mode 100644
index 0000000..3788dcc
--- /dev/null
+++ b/66-202505-浙江二级监管/已有部署备份/主机授权文件.json
@@ -0,0 +1,1350 @@
+{
+ "encrypted_host_map": {
+ "7373iZoMo4A9WiqUGnbCb4fltTV2Dac4AfVG8yNgCymUnYW4DV/Q9K0WJMcM4s+w+rCwAJpP7TRDDr057hLeu6ugH5QCteps02Og1vDJlmzFmEDquHYa3CfiIbTpA8gER81rePTvreLggr8XPHw51FC/PmhXzSQLJwNEiqH5rigIn394qxDcgh3XTsmPW0nHF0slIHhiYcHrg6vQXK1OesLzOl1K/hXKR7f9hos265CEQwtHrZLXvqqgEsxy+jSPQZSkCFPzV3iReZ7DaouUdk2P92Q3V2lWBJgTvDVwCabGzuJaNpMOZh1Ug1Js+OoNSJRmAT3Bo31nmMmEDFmUzW7G3/ep1yscAqHkAXglH3XywDlLaV+AtBouuIZ/RUi/KdVYj3cY+ksyTegj8GMzxfT2xLKxbTHQobPdiz4hCZ+8xouQKl+gsBtSGF7BNKRFslISgqEt+ac=": {
+ "system_info": {
+ "machine_id": "a02a2142-c9fe-9223-2a2a-22e0a9e4e0c7",
+ "machine_serial": "VMware-42 21 2a a0 fe c9 23 92-2a 2a 22 e0 a9 e4 e0 c7",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.23",
+ "node_ip": "192.168.10.23"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "3.00GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8864174080,
+ "available": 27804037120,
+ "use_percent": "24.2%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485732909056,
+ "available": 2733912358912,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8687169536,
+ "available": 527920553984,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8687169536,
+ "available": 527920553984,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8864174080,
+ "available": 27804037120,
+ "use_percent": "24.2%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 22647696,
+ "available": 27304292,
+ "used": 4719704,
+ "buffers": 27984,
+ "cached": 4906172,
+ "shared": 18444
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "52:d7:06:c9:11:79",
+ "ip_addresses": [
+ "172.28.10.15"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ },
+ "OihHvvC+9aSvuyCLHwp7JKxc32Vb2NFYmXIvYscO99UjUfHsyJKe0iMCmJqbfgycOtCgQrnDsUMEPjA3C9h6wC3Sp5+hxf7Vv6KeXWnhZDub0eEpSFa69O1GsKaPurWAHcE8M1kBwjrI4jBh92aXOlVSwYR+mxYijuqxW6xpyF9miM8ClYKclYORHWNGFjReZOposZnYYHQEf+la3ATmONb1KnkThTI2odcLlIMm10eF6vXl7/36Rr0e834I4yl9oivGaNIoPKWhMYPvk+MvrvKLC74R3cPV6uJinFQVvGikUzJ21zcrpbZHK7sdbkz/Ot9deBpbIIU1Wpw+coWVzO5sKigpMvEYgLWel3Ar3/mBg1/JvJA+vDR7nNACF4yDhMS0OgRiJ+TQC+BTz+b03Mp7XU3ZiAYf49u0ctwCAOkCxh5HGVoBZMUwrDR594/2EqRJ93AnD2k=": {
+ "system_info": {
+ "machine_id": "ba382142-afc3-9d98-6356-b9c37ce20c80",
+ "machine_serial": "VMware-42 21 38 ba c3 af 98 9d-63 56 b9 c3 7c e2 0c 80",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.20",
+ "node_ip": "192.168.10.20"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "3.00GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485732909056,
+ "available": 2733912358912,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9302913024,
+ "available": 27365298176,
+ "use_percent": "25.4%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 13070065664,
+ "available": 523537657856,
+ "use_percent": "2.4%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 13070065664,
+ "available": 523537657856,
+ "use_percent": "2.4%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9302913024,
+ "available": 27365298176,
+ "use_percent": "25.4%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 23211164,
+ "available": 29335132,
+ "used": 2755132,
+ "buffers": 28384,
+ "cached": 6306952,
+ "shared": 18368
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "da:73:6d:1e:79:d0",
+ "ip_addresses": [
+ "172.28.13.15"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ },
+ "tIXa7nb1GPDaYk1KAkbBq2uIReYCcnSp7w3inS9O8IIYnVaAq+W1thyWE8M0xshqjBLdcDEv6kgkHkMyCEhMTTRW9npPz1E2WOpczh4lfgkb8OcOHaZDWJ83h+PUKZWJtPztvpR4jafKDwboJbcZ2eBsSxDeqLdZpzAPWN3hp/3KYciH1hc8i2UTZfLmodTNQq1Jvja3aqt+j6F96oKrU+Jur4KjppewTYOucC+coDI7fv/GB6Qg8jcTW8ITABArsZnPH0F5/0JG70qjFFIc9AYBjQ5BMalQ6X6i0yD1hdYnar7ZYjG+v1Qfjjn0B+ayBz6fr4R9wjbhZ8D+FGkQssMYe0Kb/tnhzsgjr4O7kUzbU2m9YAuTzxCKQ8+SNlV9x79DbVCF3Ojs3XEJBQ5qVejl2yBqP1qkIO2JZI+Shns19FFgIAhqdYMeoSykV5yJ28CcFfCNi/o=": {
+ "system_info": {
+ "machine_id": "2f762142-8391-218a-7960-d2c0998fb3d8",
+ "machine_serial": "VMware-42 21 76 2f 91 83 8a 21-79 60 d2 c0 99 8f b3 d8",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.22",
+ "node_ip": "192.168.10.22"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 5218R CPU @ 2.10GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "2.10GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8863657984,
+ "available": 27804553216,
+ "use_percent": "24.2%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485271535616,
+ "available": 2734373732352,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 10849562624,
+ "available": 525758160896,
+ "use_percent": "2.0%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 10849562624,
+ "available": 525758160896,
+ "use_percent": "2.0%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8863657984,
+ "available": 27804553216,
+ "use_percent": "24.2%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 20713280,
+ "available": 27621416,
+ "used": 4599812,
+ "buffers": 22076,
+ "cached": 6966412,
+ "shared": 18420
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "f6:8e:b0:c3:c9:20",
+ "ip_addresses": [
+ "172.28.9.18"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ },
+ "wvOm0LNCKtFxK01SCBVT7Xrz7JBcFVKMLDeXZAOcQiuXiW1lxDDYOi+bZ3KT+5IvMhAw199CqCK1/a3MOPlE8FRcuQTS97iUKaUGkPRH8S5GY2l0pFrcDsfL/tyH280GrvjNZsLDu7hJRNxi4MJ68R4bifpQGjAxKMqbVeXMDliu8Voa37DQIlVlWkd30xOZ4g/MWwDo26mkGspoipd4jQMyRy/3OITY0jfvfkVPP17NEAQbPCIx4gg4Vv0O36phOW0px51o/QAwYt2Nk5OfpKLqt7azRyR+KDKoBdTstlBFce9he6F5uwelTOiWwpsyqMwJ+0Z2fXH3cmvxbPtQ3Tpav2Q00OqSWALVMoM7y9zHIp0rzciQs/EGxADBZL+zDDflF6Y5y53/5zFUPCM9CZkQA1u4l+VQCZGm1psf9TeEthSojSPTeIOwBDafjygrESd887Hod58=": {
+ "system_info": {
+ "machine_id": "5e9b2142-b005-b081-c5ef-3b277d23f8db",
+ "machine_serial": "VMware-42 21 9b 5e 05 b0 81 b0-c5 ef 3b 27 7d 23 f8 db",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.21",
+ "node_ip": "192.168.10.21"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 5218R CPU @ 2.10GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "2.10GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485271535616,
+ "available": 2734373732352,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9424048128,
+ "available": 27244163072,
+ "use_percent": "25.7%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8784474112,
+ "available": 527823249408,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8784474112,
+ "available": 527823249408,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9424048128,
+ "available": 27244163072,
+ "use_percent": "25.7%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 21842724,
+ "available": 26601880,
+ "used": 5434316,
+ "buffers": 24408,
+ "cached": 5000160,
+ "shared": 18392
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "46:bb:84:7e:f9:eb",
+ "ip_addresses": [
+ "172.28.14.14"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ }
+ },
+ "totp_code": "86395174",
+ "current_time": "2025-05-29 16:42:40",
+ "first_auth_time": "",
+ "time_offset": 0,
+ "project_namespace": "zjejpt-uas",
+ "encrypted_namespace": "QpfMkRH/0PMSFAcUJ6dv/DeE+OA+e3H/vaOAuha3zu2ybwHeTvg="
+}
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/已有部署备份/授权码.json b/66-202505-浙江二级监管/已有部署备份/授权码.json
new file mode 100644
index 0000000..1a22f04
--- /dev/null
+++ b/66-202505-浙江二级监管/已有部署备份/授权码.json
@@ -0,0 +1,1348 @@
+{
+ "totp_code": "86395174",
+ "current_time": "2025-05-29 16:43:49",
+ "encrypted_host_map": {
+ "7373iZoMo4A9WiqUGnbCb4fltTV2Dac4AfVG8yNgCymUnYW4DV/Q9K0WJMcM4s+w+rCwAJpP7TRDDr057hLeu6ugH5QCteps02Og1vDJlmzFmEDquHYa3CfiIbTpA8gER81rePTvreLggr8XPHw51FC/PmhXzSQLJwNEiqH5rigIn394qxDcgh3XTsmPW0nHF0slIHhiYcHrg6vQXK1OesLzOl1K/hXKR7f9hos265CEQwtHrZLXvqqgEsxy+jSPQZSkCFPzV3iReZ7DaouUdk2P92Q3V2lWBJgTvDVwCabGzuJaNpMOZh1Ug1Js+OoNSJRmAT3Bo31nmMmEDFmUzW7G3/ep1yscAqHkAXglH3XywDlLaV+AtBouuIZ/RUi/KdVYj3cY+ksyTegj8GMzxfT2xLKxbTHQobPdiz4hCZ+8xouQKl+gsBtSGF7BNKRFslISgqEt+ac=": {
+ "system_info": {
+ "machine_id": "a02a2142-c9fe-9223-2a2a-22e0a9e4e0c7",
+ "machine_serial": "VMware-42 21 2a a0 fe c9 23 92-2a 2a 22 e0 a9 e4 e0 c7",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.23",
+ "node_ip": "192.168.10.23"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "3.00GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8864174080,
+ "available": 27804037120,
+ "use_percent": "24.2%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485732909056,
+ "available": 2733912358912,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8687169536,
+ "available": 527920553984,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8687169536,
+ "available": 527920553984,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8864174080,
+ "available": 27804037120,
+ "use_percent": "24.2%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 22647696,
+ "available": 27304292,
+ "used": 4719704,
+ "buffers": 27984,
+ "cached": 4906172,
+ "shared": 18444
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "52:d7:06:c9:11:79",
+ "ip_addresses": [
+ "172.28.10.15"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ },
+ "OihHvvC+9aSvuyCLHwp7JKxc32Vb2NFYmXIvYscO99UjUfHsyJKe0iMCmJqbfgycOtCgQrnDsUMEPjA3C9h6wC3Sp5+hxf7Vv6KeXWnhZDub0eEpSFa69O1GsKaPurWAHcE8M1kBwjrI4jBh92aXOlVSwYR+mxYijuqxW6xpyF9miM8ClYKclYORHWNGFjReZOposZnYYHQEf+la3ATmONb1KnkThTI2odcLlIMm10eF6vXl7/36Rr0e834I4yl9oivGaNIoPKWhMYPvk+MvrvKLC74R3cPV6uJinFQVvGikUzJ21zcrpbZHK7sdbkz/Ot9deBpbIIU1Wpw+coWVzO5sKigpMvEYgLWel3Ar3/mBg1/JvJA+vDR7nNACF4yDhMS0OgRiJ+TQC+BTz+b03Mp7XU3ZiAYf49u0ctwCAOkCxh5HGVoBZMUwrDR594/2EqRJ93AnD2k=": {
+ "system_info": {
+ "machine_id": "ba382142-afc3-9d98-6356-b9c37ce20c80",
+ "machine_serial": "VMware-42 21 38 ba c3 af 98 9d-63 56 b9 c3 7c e2 0c 80",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.20",
+ "node_ip": "192.168.10.20"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "3.00GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485732909056,
+ "available": 2733912358912,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9302913024,
+ "available": 27365298176,
+ "use_percent": "25.4%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 13070065664,
+ "available": 523537657856,
+ "use_percent": "2.4%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 13070065664,
+ "available": 523537657856,
+ "use_percent": "2.4%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9302913024,
+ "available": 27365298176,
+ "use_percent": "25.4%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 23211164,
+ "available": 29335132,
+ "used": 2755132,
+ "buffers": 28384,
+ "cached": 6306952,
+ "shared": 18368
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "da:73:6d:1e:79:d0",
+ "ip_addresses": [
+ "172.28.13.15"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ },
+ "tIXa7nb1GPDaYk1KAkbBq2uIReYCcnSp7w3inS9O8IIYnVaAq+W1thyWE8M0xshqjBLdcDEv6kgkHkMyCEhMTTRW9npPz1E2WOpczh4lfgkb8OcOHaZDWJ83h+PUKZWJtPztvpR4jafKDwboJbcZ2eBsSxDeqLdZpzAPWN3hp/3KYciH1hc8i2UTZfLmodTNQq1Jvja3aqt+j6F96oKrU+Jur4KjppewTYOucC+coDI7fv/GB6Qg8jcTW8ITABArsZnPH0F5/0JG70qjFFIc9AYBjQ5BMalQ6X6i0yD1hdYnar7ZYjG+v1Qfjjn0B+ayBz6fr4R9wjbhZ8D+FGkQssMYe0Kb/tnhzsgjr4O7kUzbU2m9YAuTzxCKQ8+SNlV9x79DbVCF3Ojs3XEJBQ5qVejl2yBqP1qkIO2JZI+Shns19FFgIAhqdYMeoSykV5yJ28CcFfCNi/o=": {
+ "system_info": {
+ "machine_id": "2f762142-8391-218a-7960-d2c0998fb3d8",
+ "machine_serial": "VMware-42 21 76 2f 91 83 8a 21-79 60 d2 c0 99 8f b3 d8",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.22",
+ "node_ip": "192.168.10.22"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 5218R CPU @ 2.10GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "2.10GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8863657984,
+ "available": 27804553216,
+ "use_percent": "24.2%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485271535616,
+ "available": 2734373732352,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 10849562624,
+ "available": 525758160896,
+ "use_percent": "2.0%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 10849562624,
+ "available": 525758160896,
+ "use_percent": "2.0%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 8863657984,
+ "available": 27804553216,
+ "use_percent": "24.2%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 20713280,
+ "available": 27621416,
+ "used": 4599812,
+ "buffers": 22076,
+ "cached": 6966412,
+ "shared": 18420
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "f6:8e:b0:c3:c9:20",
+ "ip_addresses": [
+ "172.28.9.18"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ },
+ "wvOm0LNCKtFxK01SCBVT7Xrz7JBcFVKMLDeXZAOcQiuXiW1lxDDYOi+bZ3KT+5IvMhAw199CqCK1/a3MOPlE8FRcuQTS97iUKaUGkPRH8S5GY2l0pFrcDsfL/tyH280GrvjNZsLDu7hJRNxi4MJ68R4bifpQGjAxKMqbVeXMDliu8Voa37DQIlVlWkd30xOZ4g/MWwDo26mkGspoipd4jQMyRy/3OITY0jfvfkVPP17NEAQbPCIx4gg4Vv0O36phOW0px51o/QAwYt2Nk5OfpKLqt7azRyR+KDKoBdTstlBFce9he6F5uwelTOiWwpsyqMwJ+0Z2fXH3cmvxbPtQ3Tpav2Q00OqSWALVMoM7y9zHIp0rzciQs/EGxADBZL+zDDflF6Y5y53/5zFUPCM9CZkQA1u4l+VQCZGm1psf9TeEthSojSPTeIOwBDafjygrESd887Hod58=": {
+ "system_info": {
+ "machine_id": "5e9b2142-b005-b081-c5ef-3b277d23f8db",
+ "machine_serial": "VMware-42 21 9b 5e 05 b0 81 b0-c5 ef 3b 27 7d 23 f8 db",
+ "os": {
+ "name": "Debian GNU/Linux",
+ "version": "12 (bookworm)",
+ "id": "debian",
+ "id_like": "unknown",
+ "version_id": "12",
+ "pretty_name": "Debian GNU/Linux 12 (bookworm)"
+ },
+ "kernel_version": "Linux version 5.10.0-60.18.0.50.oe2203.x86_64 (abuild@ecs-obsworker-209) (gcc_old (GCC) 10.3.1, GNU ld (GNU Binutils) 2.37) #1 SMP Wed Mar 30 03:12:24 UTC 2022",
+ "node_name": "192.168.10.21",
+ "node_ip": "192.168.10.21"
+ },
+ "cpu_info": {
+ "model_name": "Intel(R) Xeon(R) Gold 5218R CPU @ 2.10GHz",
+ "cores": 1,
+ "architecture": "amd64",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "nopl",
+ "xtopology",
+ "tsc_reliable",
+ "nonstop_tsc",
+ "cpuid",
+ "pni",
+ "pclmulqdq",
+ "ssse3",
+ "fma",
+ "cx16",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "hypervisor",
+ "lahf_lm",
+ "abm",
+ "3dnowprefetch",
+ "invpcid_single",
+ "ssbd",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ibrs_enhanced",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "invpcid",
+ "avx512f",
+ "avx512dq",
+ "rdseed",
+ "adx",
+ "smap",
+ "clflushopt",
+ "clwb",
+ "avx512cd",
+ "avx512bw",
+ "avx512vl",
+ "xsaveopt",
+ "xsavec",
+ "xgetbv1",
+ "xsaves",
+ "arat",
+ "pku",
+ "ospke",
+ "avx512_vnni",
+ "md_clear",
+ "flush_l1d",
+ "arch_capabilities"
+ ],
+ "hypervisor": "present",
+ "virtualization": "unknown",
+ "frequency": "2.10GHz"
+ },
+ "disk_info": [
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/fs/cgroup",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "filesystem": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "type": "nfs4",
+ "size": 3219645267968,
+ "used": 485271535616,
+ "available": 2734373732352,
+ "use_percent": "15.1%",
+ "mount_point": "/cmii/logs",
+ "physical_device": "192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uas-gateway",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9424048128,
+ "available": 27244163072,
+ "use_percent": "25.7%",
+ "mount_point": "/dev/termination-log",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8784474112,
+ "available": 527823249408,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/resolv.conf",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/sdb1",
+ "filesystem": "/dev/sdb1",
+ "type": "xfs",
+ "size": 536607723520,
+ "used": 8784474112,
+ "available": 527823249408,
+ "use_percent": "1.6%",
+ "mount_point": "/etc/hostname",
+ "physical_device": "/dev/sdb1",
+ "physical_size": 0
+ },
+ {
+ "device": "/dev/mapper/openeuler-root",
+ "filesystem": "/dev/mapper/openeuler-root",
+ "type": "ext4",
+ "size": 36668211200,
+ "used": 9424048128,
+ "available": 27244163072,
+ "use_percent": "25.7%",
+ "mount_point": "/etc/hosts",
+ "physical_device": "/dev/mapper/openeuler-root",
+ "physical_size": 0
+ },
+ {
+ "device": "shm",
+ "filesystem": "shm",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/dev/shm",
+ "physical_device": "shm",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 12288,
+ "available": 16547827712,
+ "use_percent": "0.0%",
+ "mount_point": "/run/secrets/kubernetes.io/serviceaccount",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/acpi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/kcore",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/keys",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/timer_list",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 67108864,
+ "used": 0,
+ "available": 67108864,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/sched_debug",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/proc/scsi",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ },
+ {
+ "device": "tmpfs",
+ "filesystem": "tmpfs",
+ "type": "tmpfs",
+ "size": 16547840000,
+ "used": 0,
+ "available": 16547840000,
+ "use_percent": "0.0%",
+ "mount_point": "/sys/firmware",
+ "physical_device": "tmpfs",
+ "physical_size": 0
+ }
+ ],
+ "memory_info": {
+ "total": 32320000,
+ "free": 21842724,
+ "available": 26601880,
+ "used": 5434316,
+ "buffers": 24408,
+ "cached": 5000160,
+ "shared": 18392
+ },
+ "net_info": [
+ {
+ "name": "eth0",
+ "mac_address": "46:bb:84:7e:f9:eb",
+ "ip_addresses": [
+ "172.28.14.14"
+ ]
+ }
+ ],
+ "motherboard_info": {
+ "manufacturer": "",
+ "product": "",
+ "version": "",
+ "serial": ""
+ }
+ }
+ },
+ "project_namespace": "zjejpt-uas",
+ "encrypted_namespace": "QpfMkRH/0PMSFAcUJ6dv/DeE+OA+e3H/vaOAuha3zu2ybwHeTvg="
+}
\ No newline at end of file
diff --git a/66-202505-浙江二级监管/部署文件/k8s-backend.yaml b/66-202505-浙江二级监管/部署文件/k8s-backend.yaml
new file mode 100644
index 0000000..df627e1
--- /dev/null
+++ b/66-202505-浙江二级监管/部署文件/k8s-backend.yaml
@@ -0,0 +1,1400 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-gateway
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 2
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-gateway
+ image: 192.168.10.3:8033/cmii/cmii-uas-gateway:2.1-demo-20250527-licence
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uas-gateway
+ - name: CUST_JAVA_OPTS
+ value: "-Xms2000m -Xmx5500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uas-gateway:2.1-demo-20250527-licence
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 6Gi
+ cpu: "4"
+ requests:
+ memory: "1Gi"
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uas-gateway
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-gateway
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-notice
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-notice
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-notice
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-notice
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-notice
+ image: 192.168.10.3:8033/cmii/cmii-uav-notice:pro-6.0.8
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uav-notice
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uav-notice:pro-6.0.8
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-notice
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-notice
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-notice
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-notice
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 1Gi
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uav-notice
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-notice
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-notice
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-notice
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-material-warehouse
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-material-warehouse
+ image: 192.168.10.3:8033/cmii/cmii-uav-material-warehouse:6.2.0-050701
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uav-material-warehouse
+ - name: CUST_JAVA_OPTS
+ value: "-Xms1000m -Xmx5500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uav-material-warehouse:6.2.0-050701
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 6Gi
+ cpu: "4"
+ requests:
+ memory: "1Gi"
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uav-material-warehouse
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-material-warehouse
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-sense-adapter
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-sense-adapter
+ image: 192.168.10.3:8033/cmii/cmii-uav-sense-adapter:6.2.0-250415
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uav-sense-adapter
+ - name: CUST_JAVA_OPTS
+ value: "-Xms1000m -Xmx5500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uav-sense-adapter:6.2.0-250415
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 6Gi
+ cpu: "6"
+ requests:
+ memory: "1Gi"
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uav-sense-adapter
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-sense-adapter
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-lifecycle
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 2
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-lifecycle
+ image: 192.168.10.3:8033/cmii/cmii-uas-lifecycle:2.1-demo-20250527-licence
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uas-lifecycle
+ - name: CUST_JAVA_OPTS
+ value: "-Xms1000m -Xmx5500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uas-lifecycle:2.1-demo-20250527-licence
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 6Gi
+ cpu: "6"
+ requests:
+ memory: "1Gi"
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uas-lifecycle
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-lifecycle
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uavms-pyfusion
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uavms-pyfusion
+ image: 192.168.10.3:8033/cmii/cmii-uavms-pyfusion:6.3.6
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uavms-pyfusion
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uavms-pyfusion:6.3.6
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 6Gi
+ cpu: "4"
+ requests:
+ memory: "1Gi"
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uavms-pyfusion
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uavms-pyfusion
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-iot-dispatcher
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-iot-dispatcher
+ image: 192.168.10.3:8033/cmii/cmii-uav-iot-dispatcher:6.2.0-focus
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uav-iot-dispatcher
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uav-iot-dispatcher:6.2.0-focus
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 6Gi
+ cpu: "4"
+ requests:
+ memory: "1Gi"
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uav-iot-dispatcher
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-iot-dispatcher
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-watchdog
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-watchdog
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-watchdog
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-watchdog
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - zjejpt-uas
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-watchdog
+ image: 192.168.10.3:8033/cmii/cmii-uav-watchdog:1.0
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uav-watchdog
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos.zjyd.svc.cluster.local:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.10.3:8033/cmii/cmii-uav-watchdog:1.0
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-watchdog
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-watchdog
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-watchdog
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-watchdog
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 1Gi
+ cpu: "2"
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: zjejpt-uas/cmii-uav-watchdog
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-watchdog
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-watchdog
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-watchdog
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/66-202505-浙江二级监管/部署文件/k8s-configmap.yaml b/66-202505-浙江二级监管/部署文件/k8s-configmap.yaml
new file mode 100644
index 0000000..4d99b40
--- /dev/null
+++ b/66-202505-浙江二级监管/部署文件/k8s-configmap.yaml
@@ -0,0 +1,644 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: zjejpt-uas
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "lingyun.zyjctech.com:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
diff --git a/66-202505-浙江二级监管/部署文件/k8s-frontend.yaml b/66-202505-浙江二级监管/部署文件/k8s-frontend.yaml
new file mode 100644
index 0000000..095a61e
--- /dev/null
+++ b/66-202505-浙江二级监管/部署文件/k8s-frontend.yaml
@@ -0,0 +1,203 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-uasms
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-uasms
+ image: 192.168.10.3:8033/cmii/cmii-uav-platform-uasms:2.1-demo-20250527
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-uasms
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-uasms
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-uasms
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasms
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-uas
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-uas
+ image: 192.168.10.3:8033/cmii/cmii-uav-platform-uas:2.1-demo-20250527
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: zjejpt-uas
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-uas
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-uas
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-uas
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uas
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
diff --git a/66-202505-浙江二级监管/部署文件/k8s-ingress.yaml b/66-202505-浙江二级监管/部署文件/k8s-ingress.yaml
new file mode 100644
index 0000000..d766232
--- /dev/null
+++ b/66-202505-浙江二级监管/部署文件/k8s-ingress.yaml
@@ -0,0 +1,66 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: zjejpt-uas
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.zjejpt-uas.io
+ http:
+ paths:
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: zjejpt-uas
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.zjejpt-uas.io
+ http:
+ paths:
+ - path: /uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml b/66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml
new file mode 100644
index 0000000..342cca5
--- /dev/null
+++ b/66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim-uas
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-uas-storage-class" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-uas-storage-class
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod-uas
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 192.168.10.3:8033/cmii/alpine:1.0.0
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/66-202505-浙江二级监管/部署文件/k8s-nfs.yaml b/66-202505-浙江二级监管/部署文件/k8s-nfs.yaml
new file mode 100644
index 0000000..b3a2cce
--- /dev/null
+++ b/66-202505-浙江二级监管/部署文件/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner-uas
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-uas-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner-uas
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner-uas
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-uas-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner-uas
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner-uas
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner-uas
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner-uas
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-uas-storage-class
+provisioner: cmlc-nfs-uas-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner-uas
+ labels:
+ app: nfs-client-provisioner-uas
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner-uas
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner-uas
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner-uas
+ containers:
+ - name: nfs-client-provisioner-uas
+ image: 192.168.10.3:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-uas-storage
+ - name: NFS_SERVER
+ value: 192.168.10.2
+ - name: NFS_PATH
+ value: /data/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 192.168.10.2
+ path: /data/nfs_data
diff --git a/66-202505-浙江二级监管/部署文件/k8s-pvc.yaml b/66-202505-浙江二级监管/部署文件/k8s-pvc.yaml
new file mode 100644
index 0000000..b2a46bf
--- /dev/null
+++ b/66-202505-浙江二级监管/部署文件/k8s-pvc.yaml
@@ -0,0 +1,20 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: zjejpt-uas
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.1
+spec:
+ storageClassName: nfs-uas-storage-class
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 1000Gi
+
diff --git a/67-202508-雄安空能院/0-批量脚本.sh b/67-202508-雄安空能院/0-批量脚本.sh
new file mode 100644
index 0000000..c04886f
--- /dev/null
+++ b/67-202508-雄安空能院/0-批量脚本.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+
+host_ip_list=(192.168.0.3 192.168.0.4 192.168.0.5 192.168.0.6)
+
+for server in "${host_ip_list[@]}";do
+
+ ssh root@$server "echo yes !"
+ ssh root@$server "curl -s http://172.29.137.125"
+ ssh root@$server "echo /n/n"
+
+done
+
+
+
+
+
+
+
+# scp /usr/local/bin/agent-wdd root@"$server":/usr/local/bin/agent-wdd
+# ssh root@"$server" "chmod +x /usr/local/bin/agent-wdd"
+# ssh root@"$server" "/usr/local/bin/agent-wdd info all"
+# ssh root@"$server" "/usr/local/bin/agent-wdd base swap"
+# ssh root@"$server" "/usr/local/bin/agent-wdd base firewall"
+# ssh root@"$server" "/usr/local/bin/agent-wdd base selinux"
+# ssh root@"$server" "/usr/local/bin/agent-wdd base sysconfig"
+
+
+ssh root@${server} "mkdir /root/wdd"
+scp /root/wdd/disk.sh root@${server}:/root/wdd/
+ssh root@${server} "bash /root/wdd/disk.sh"
+
+scp /roo/wdd/docker.sh root@${server}:/root/wdd/
+ssh root@${server} "bash /root/wdd/docker.sh"
diff --git a/67-202508-雄安空能院/cluster.yaml b/67-202508-雄安空能院/cluster.yaml
new file mode 100644
index 0000000..4c4e6ea
--- /dev/null
+++ b/67-202508-雄安空能院/cluster.yaml
@@ -0,0 +1,239 @@
+nodes:
+ - address: 192.168.0.2
+ user: root
+ role:
+ - controlplane
+ - etcd
+ - worker
+ internal_address: 192.168.0.2
+ labels:
+ ingress-deploy: true
+ - address: 192.168.0.3
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.0.3
+ labels:
+ ingress-deploy: true
+ uavcloud.env: xakny
+ - address: 192.168.0.4
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.0.4
+ labels:
+ ingress-deploy: true
+ uavcloud.env: xakny
+ - address: 192.168.0.5
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.0.5
+ labels:
+ ingress-deploy: true
+ uavcloud.env: xakny
+ mysql-deploy: true
+ - address: 192.168.0.6
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.0.6
+ labels:
+ doris.cluster: "true"
+ minio.node: "true"
+
+
+
+authentication:
+ strategy: x509
+ sans:
+ - "192.168.0.2"
+
+private_registries:
+ - url: 192.168.0.2:8033 # 私有镜像库地址
+ user: admin
+ password: "V2ryStr@ngPss"
+ is_default: true
+
+##############################################################################
+
+# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
+ignore_docker_version: true
+
+# Set the name of the Kubernetes cluster
+cluster_name: rke-cluster
+
+kubernetes_version: v1.20.4-rancher1-1
+
+ssh_key_path: /root/.ssh/id_ed25519
+
+# Enable running cri-dockerd
+# Up to Kubernetes 1.23, kubelet contained code called dockershim
+# to support Docker runtime. The replacement is called cri-dockerd
+# and should be enabled if you want to keep using Docker as your
+# container runtime
+# Only available to enable in Kubernetes 1.21 and higher
+enable_cri_dockerd: true
+
+
+services:
+ etcd:
+ backup_config:
+ enabled: false
+ interval_hours: 72
+ retention: 3
+ safe_timestamp: false
+ timeout: 300
+ creation: 12h
+ extra_args:
+ election-timeout: 5000
+ heartbeat-interval: 500
+ gid: 0
+ retention: 72h
+ snapshot: false
+ uid: 0
+
+ kube-api:
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-controller
+ service_cluster_ip_range: 172.29.0.0/16
+ # Expose a different port range for NodePort services
+ service_node_port_range: 30000-40000
+ always_pull_images: true
+ pod_security_policy: false
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Enable audit log to stdout
+ audit-log-path: "-"
+ # Increase number of delete workers
+ delete-collection-workers: 3
+ # Set the level of log output to warning-level
+ v: 0
+ # Using the EventRateLimit admission control enforces a limit on the number of events
+ # that the API Server will accept in a given time period
+ # Available as of v1.0.0
+ event_rate_limit:
+ enabled: false
+ configuration:
+ apiVersion: eventratelimit.admission.k8s.io/v1alpha1
+ kind: Configuration
+ limits:
+ - type: Server
+ qps: 6000
+ burst: 30000
+ kube-controller:
+ # CIDR pool used to assign IP addresses to pods in the cluster
+ cluster_cidr: 172.28.0.0/16
+ # IP range for any services created on Kubernetes
+ # This must match the service_cluster_ip_range in kube-api
+ service_cluster_ip_range: 172.29.0.0/16
+ # Add additional arguments to the kubernetes API server
+ # This WILL OVERRIDE any existing defaults
+ extra_args:
+ # Set the level of log output to debug-level
+ v: 1
+ # Enable RotateKubeletServerCertificate feature gate
+ feature-gates: RotateKubeletServerCertificate=true
+ # Enable TLS Certificates management
+ # https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
+ cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
+ kubelet:
+ # Base domain for the cluster
+ cluster_domain: cluster.local
+ # IP address for the DNS service endpoint
+ cluster_dns_server: 172.29.0.10
+ # Fail if swap is on
+ fail_swap_on: false
+ # Set max pods to 250 instead of default 110
+ extra_binds:
+ - "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
+ extra_args:
+ max-pods: 162
+ # Optionally define additional volume binds to a service
+ scheduler:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 0
+ kubeproxy:
+ extra_args:
+ # Set the level of log output to warning-level
+ v: 0
+
+authorization:
+ mode: rbac
+
+addon_job_timeout: 30
+
+network:
+ options:
+ flannel_backend_type: host-gw
+ flannel_iface: eth0
+ flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
+ flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
+ plugin: calico
+
+# Specify network plugin-in (canal, calico, flannel, weave, or none)
+# network:
+# mtu: 1440
+# options:
+# flannel_backend_type: vxlan
+# plugin: calico
+# tolerations:
+# - key: "node.kubernetes.io/unreachable"
+# operator: "Exists"
+# effect: "NoExecute"
+# tolerationseconds: 300
+# - key: "node.kubernetes.io/not-ready"
+# operator: "Exists"
+# effect: "NoExecute"
+# tolerationseconds: 300
+
+# Specify DNS provider (coredns or kube-dns)
+dns:
+ provider: coredns
+ nodelocal:
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 20%
+ maxSurge: 15%
+ linear_autoscaler_params:
+ cores_per_replica: 0.34
+ nodes_per_replica: 4
+ prevent_single_point_failure: true
+ min: 2
+ max: 3
+ tolerations:
+ - key: "node.kubernetes.io/unreachable"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+ - key: "node.kubernetes.io/not-ready"
+ operator: "Exists"
+ effect: "NoExecute"
+ tolerationseconds: 300
+
+# Specify monitoring provider (metrics-server)
+monitoring:
+ provider: metrics-server
+ # Available as of v1.1.0
+ update_strategy:
+ strategy: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 8
+
+ingress:
+ provider: nginx
+ default_backend: true
+ http_port: 0
+ https_port: 0
+ extra_envs:
+ - name: TZ
+ value: Asia/Shanghai
+ node_selector:
+ ingress-deploy: true
+ options:
+ use-forwarded-headers: "true"
\ No newline at end of file
diff --git a/67-202508-雄安空能院/cmii-update.sh b/67-202508-雄安空能院/cmii-update.sh
new file mode 100644
index 0000000..917a651
--- /dev/null
+++ b/67-202508-雄安空能院/cmii-update.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+harbor_host=192.168.0.2:8033
+namespace=xakny
+app_name=""
+new_tag=""
+
+download_from_oss() {
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+
+ echo "start to download => $1"
+ wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
+
+ echo ""
+ echo ""
+}
+
+upload_image_to_harbor(){
+ if [ "$app_name" == "" ]; then
+ echo "app name null exit!"
+ exit 233
+ fi
+
+ if ! docker load < "$1"; then
+ echo "docker load error !"
+ fi
+ docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+ echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
+ docker login -u admin -p V2ryStr@ngPss $harbor_host
+ docker push "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+
+}
+
+parse_args(){
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+ local image_name="$1"
+
+ # cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
+ app_name=$(echo $image_name | cut -d "=" -f1)
+ new_tag=$(echo $image_name | cut -d "=" -f2)
+}
+
+update_image_tag(){
+ if [ "$new_tag" == "" ]; then
+ echo "new tag error!"
+ exit 233
+ fi
+
+ local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
+
+ echo "image grep is => ${image_prefix}"
+
+ echo "start to update ${namespace} ${app_name} to ${new_tag} !"
+ echo ""
+ kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
+ echo ""
+ echo "start to wait for 3 seconds!"
+ sleep 3
+ local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
+ echo ""
+ echo "new image are => $image_new"
+ echo ""
+}
+
+main(){
+ parse_args "$1"
+ download_from_oss "$1"
+ upload_image_to_harbor "$1"
+ update_image_tag
+}
+
+main "$@"
\ No newline at end of file
diff --git a/67-202508-雄安空能院/deploy/k8s-backend.yaml b/67-202508-雄安空能院/deploy/k8s-backend.yaml
new file mode 100644
index 0000000..5460481
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-backend.yaml
@@ -0,0 +1,1225 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-lifecycle
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 0
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-lifecycle
+ image: 192.168.0.2:8033/cmii/cmii-uas-lifecycle:2.1-test-20250801
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uas-lifecycle
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.0.2:8033/cmii/cmii-uas-lifecycle:2.1-test-20250801
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xakny/cmii-uas-lifecycle
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-lifecycle
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-lifecycle-kny
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle-kny
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle-kny
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle-kny
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-lifecycle-kny
+ image: 192.168.0.2:8033/cmii/cmii-uas-lifecycle-kny:5.7.0-snapshot
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uas-lifecycle-kny
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.0.2:8033/cmii/cmii-uas-lifecycle-kny:5.7.0-snapshot
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle-kny
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle-kny
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle-kny
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-lifecycle-kny
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xakny/cmii-uas-lifecycle-kny
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-lifecycle-kny
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle-kny
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-lifecycle-kny
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uas-gateway
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 0
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uas-gateway
+ image: 192.168.0.2:8033/cmii/cmii-uas-gateway:2.1-test-prof
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uas-gateway
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.0.2:8033/cmii/cmii-uas-gateway:2.1-test-prof
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uas-gateway
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xakny/cmii-uas-gateway
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uas-gateway
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uas-gateway
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-material-warehouse
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-material-warehouse
+ image: 192.168.0.2:8033/cmii/cmii-uav-material-warehouse:master-2.1-20250704
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uav-material-warehouse
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.0.2:8033/cmii/cmii-uav-material-warehouse:master-2.1-20250704
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-material-warehouse
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xakny/cmii-uav-material-warehouse
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-material-warehouse
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-material-warehouse
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uavms-pyfusion
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uavms-pyfusion
+ image: 192.168.0.2:8033/cmii/cmii-uavms-pyfusion:6.3.6
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uavms-pyfusion
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.0.2:8033/cmii/cmii-uavms-pyfusion:6.3.6
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uavms-pyfusion
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xakny/cmii-uavms-pyfusion
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uavms-pyfusion
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uavms-pyfusion
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-iot-dispatcher
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-iot-dispatcher
+ image: 192.168.0.2:8033/cmii/cmii-uav-iot-dispatcher:6.2.0-focus
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uav-iot-dispatcher
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.0.2:8033/cmii/cmii-uav-iot-dispatcher:6.2.0-focus
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-iot-dispatcher
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xakny/cmii-uav-iot-dispatcher
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-iot-dispatcher
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-iot-dispatcher
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-sense-adapter
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-sense-adapter
+ image: 192.168.0.2:8033/cmii/cmii-uav-sense-adapter:pro-2.1.0
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uav-sense-adapter
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: uas-2.1
+ - name: SYS_CONFIG_GROUP
+ value: uas-2.1
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.0.2:8033/cmii/cmii-uav-sense-adapter:pro-2.1.0
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-uav-sense-adapter
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xakny/cmii-uav-sense-adapter
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-sense-adapter
+ namespace: xakny
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-uav-sense-adapter
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/67-202508-雄安空能院/deploy/k8s-configmap.yaml b/67-202508-雄安空能院/deploy/k8s-configmap.yaml
new file mode 100644
index 0000000..ebd4cde
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-configmap.yaml
@@ -0,0 +1,672 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: xakny
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "",
+ CloudHOST: "111.63.69.71:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
diff --git a/67-202508-雄安空能院/deploy/k8s-dashboard.yaml b/67-202508-雄安空能院/deploy/k8s-dashboard.yaml
new file mode 100644
index 0000000..4676380
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-dashboard.yaml
@@ -0,0 +1,309 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kube-system
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kube-system
+
+---
+
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ kubernetes.io/cluster-service: "true"
+ name: kubernetes-dashboard
+ namespace: kube-system
+spec:
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+ type: NodePort
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kube-system
+type: Opaque
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kube-system
+type: Opaque
+data:
+ csrf: ""
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-key-holder
+ namespace: kube-system
+type: Opaque
+
+---
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-settings
+ namespace: kube-system
+
+---
+
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kube-system
+rules:
+ # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Allow Dashboard to get metrics.
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+rules:
+ # Allow Metrics Scraper to get metrics from the Metrics server
+ - apiGroups: ["metrics.k8s.io"]
+ resources: ["pods", "nodes"]
+ verbs: ["get", "list", "watch"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kube-system
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kubernetes-dashboard
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kube-system
+
+---
+
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kube-system
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: kubernetes-dashboard
+ image: 192.168.0.2:8033/cmii/dashboard:v2.0.1
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kube-system
+ # Uncomment the following line to manually specify Kubernetes API server Host
+ # If not specified, Dashboard will attempt to auto discover the API server and connect
+ # to it. Uncomment only if the default does not work.
+ # - --apiserver-host=http://my-address:port
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ # Create on-disk volume to store exec logs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ # Comment the following tolerations if Dashboard must not be deployed on master
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+
+---
+
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kube-system
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kube-system
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: 192.168.0.2:8033/cmii/metrics-scraper:v1.0.4
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ # Comment the following tolerations if Dashboard must not be deployed on master
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kube-system
diff --git a/67-202508-雄安空能院/deploy/k8s-emqx.yaml b/67-202508-雄安空能院/deploy/k8s-emqx.yaml
new file mode 100644
index 0000000..4c81d20
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-emqx.yaml
@@ -0,0 +1,276 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: xakny
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-emqxs-env
+ namespace: xakny
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+data:
+ EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
+ EMQX_NAME: "helm-emqxs"
+ EMQX_CLUSTER__DISCOVERY: "k8s"
+ EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
+ EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
+ EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
+ EMQX_CLUSTER__K8S__namespace: "xakny"
+ EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
+ EMQX_ALLOW_ANONYMOUS: "false"
+ EMQX_ACL_NOMATCH: "deny"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-emqxs-cm
+ namespace: xakny
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+data:
+ emqx_auth_mnesia.conf: |-
+ auth.mnesia.password_hash = sha256
+
+ # clientid 认证数据
+ auth.client.1.clientid = admin
+ auth.client.1.password = odD8#Ve7.B
+ auth.client.2.clientid = cmlc
+ auth.client.2.password = odD8#Ve7.B
+
+ ## username 认证数据
+ auth.user.1.username = admin
+ auth.user.1.password = odD8#Ve7.B
+ auth.user.2.username = cmlc
+ auth.user.2.password = odD8#Ve7.B
+
+ acl.conf: |-
+ {allow, {user, "admin"}, pubsub, ["admin/#"]}.
+ {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
+ {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
+ {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
+ {allow, all}.
+
+ loaded_plugins: |-
+ {emqx_auth_mnesia,true}.
+ {emqx_auth_mnesia,true}.
+ {emqx_management, true}.
+ {emqx_recon, true}.
+ {emqx_retainer, false}.
+ {emqx_dashboard, true}.
+ {emqx_telemetry, true}.
+ {emqx_rule_engine, true}.
+ {emqx_bridge_mqtt, false}.
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: xakny
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+ spec:
+ affinity: {}
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-emqxs
+ containers:
+ - name: helm-emqxs
+ image: 192.168.0.2:8033/cmii/emqx:5.5.1
+ imagePullPolicy: Always
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: mgmt
+ containerPort: 8081
+ - name: ws
+ containerPort: 8083
+ - name: wss
+ containerPort: 8084
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+ envFrom:
+ - configMapRef:
+ name: helm-emqxs-env
+ resources: {}
+ volumeMounts:
+ - name: emqx-data
+ mountPath: "/opt/emqx/data/mnesia"
+ readOnly: false
+ - name: helm-emqxs-cm
+ mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
+ subPath: emqx_auth_mnesia.conf
+ readOnly: false
+# - name: helm-emqxs-cm
+# mountPath: "/opt/emqx/etc/acl.conf"
+# subPath: "acl.conf"
+# readOnly: false
+ - name: helm-emqxs-cm
+ mountPath: "/opt/emqx/data/loaded_plugins"
+ subPath: loaded_plugins
+ readOnly: false
+ volumes:
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+ - name: helm-emqxs-cm
+ configMap:
+ name: helm-emqxs-cm
+ items:
+ - key: emqx_auth_mnesia.conf
+ path: emqx_auth_mnesia.conf
+ - key: acl.conf
+ path: acl.conf
+ - key: loaded_plugins
+ path: loaded_plugins
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-emqxs
+ namespace: xakny
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - watch
+ - list
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-emqxs
+ namespace: xakny
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: xakny
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: xakny
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - port: 1883
+ name: mqtt
+ targetPort: 1883
+ nodePort: 31883
+ - port: 18083
+ name: dashboard
+ targetPort: 18083
+ nodePort: 38085
+ - port: 8083
+ name: mqtt-websocket
+ targetPort: 8083
+ nodePort: 38083
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: xakny
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: ClusterIP
+ clusterIP: None
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ protocol: TCP
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ protocol: TCP
+ targetPort: 8883
+ - name: mgmt
+ port: 8081
+ protocol: TCP
+ targetPort: 8081
+ - name: websocket
+ port: 8083
+ protocol: TCP
+ targetPort: 8083
+ - name: wss
+ port: 8084
+ protocol: TCP
+ targetPort: 8084
+ - name: dashboard
+ port: 18083
+ protocol: TCP
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ protocol: TCP
+ targetPort: 4370
diff --git a/67-202508-雄安空能院/deploy/k8s-frontend.yaml b/67-202508-雄安空能院/deploy/k8s-frontend.yaml
new file mode 100644
index 0000000..2e4a792
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-frontend.yaml
@@ -0,0 +1,203 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: xakny
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-uaskny
+ namespace: xakny
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uaskny
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uaskny
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uaskny
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-uaskny
+ image: 192.168.0.2:8033/cmii/cmii-uav-platform-uaskny:5.7.0-snapshot
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-uaskny
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-uas
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-uaskny
+ namespace: xakny
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uaskny
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uaskny
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-uasmskny
+ namespace: xakny
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasmskny
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: uas-2.1
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasmskny
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasmskny
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-uasmskny
+ image: 192.168.0.2:8033/cmii/cmii-uav-platform-uasmskny:develop-0807
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xakny
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-uasmskny
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-uasms
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-uasmskny
+ namespace: xakny
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasmskny
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-uasmskny
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
diff --git a/67-202508-雄安空能院/deploy/k8s-ingress.yaml b/67-202508-雄安空能院/deploy/k8s-ingress.yaml
new file mode 100644
index 0000000..d3e1c62
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-ingress.yaml
@@ -0,0 +1,826 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: xakny
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/supervision)$ $1/ redirect;
+ rewrite ^(/supervisionh5)$ $1/ redirect;
+ rewrite ^(/pangu)$ $1/ redirect;
+ rewrite ^(/ai-brain)$ $1/ redirect;
+ rewrite ^(/armypeople)$ $1/ redirect;
+ rewrite ^(/awareness)$ $1/ redirect;
+ rewrite ^(/base)$ $1/ redirect;
+ rewrite ^(/blockchain)$ $1/ redirect;
+ rewrite ^(/classification)$ $1/ redirect;
+ rewrite ^(/cmsportal)$ $1/ redirect;
+ rewrite ^(/detection)$ $1/ redirect;
+ rewrite ^(/dikongzhixingh5)$ $1/ redirect;
+ rewrite ^(/dispatchh5)$ $1/ redirect;
+ rewrite ^(/emergency)$ $1/ redirect;
+ rewrite ^(/eventsh5)$ $1/ redirect;
+ rewrite ^(/flight-control)$ $1/ redirect;
+ rewrite ^(/hljtt)$ $1/ redirect;
+ rewrite ^(/hyper)$ $1/ redirect;
+ rewrite ^(/iot)$ $1/ redirect;
+ rewrite ^(/jiangsuwenlv)$ $1/ redirect;
+ rewrite ^(/logistics)$ $1/ redirect;
+ rewrite ^(/media)$ $1/ redirect;
+ rewrite ^(/mianyangbackend)$ $1/ redirect;
+ rewrite ^(/multiterminal)$ $1/ redirect;
+ rewrite ^(/mws)$ $1/ redirect;
+ rewrite ^(/oms)$ $1/ redirect;
+ rewrite ^(/open)$ $1/ redirect;
+ rewrite ^(/pilot2cloud)$ $1/ redirect;
+ rewrite ^(/qingdao)$ $1/ redirect;
+ rewrite ^(/qinghaitourism)$ $1/ redirect;
+ rewrite ^(/renyike)$ $1/ redirect;
+ rewrite ^(/scanner)$ $1/ redirect;
+ rewrite ^(/security)$ $1/ redirect;
+ rewrite ^(/securityh5)$ $1/ redirect;
+ rewrite ^(/seniclive)$ $1/ redirect;
+ rewrite ^(/share)$ $1/ redirect;
+ rewrite ^(/smauth)$ $1/ redirect;
+ rewrite ^(/smsecret)$ $1/ redirect;
+ rewrite ^(/splice)$ $1/ redirect;
+ rewrite ^(/threedsimulation)$ $1/ redirect;
+ rewrite ^(/traffic)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/visualization)$ $1/ redirect;
+ rewrite ^(/uavmsmanager)$ $1/ redirect;
+ rewrite ^(/secenter)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.xakny.io
+ http:
+ paths:
+ - path: /?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervision
+ servicePort: 9528
+ - path: /supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervisionh5
+ servicePort: 9528
+ - path: /pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-ai-brain
+ servicePort: 9528
+ - path: /armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-armypeople
+ servicePort: 9528
+ - path: /awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-awareness
+ servicePort: 9528
+ - path: /base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-base
+ servicePort: 9528
+ - path: /blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-blockchain
+ servicePort: 9528
+ - path: /classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-classification
+ servicePort: 9528
+ - path: /cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-cms-portal
+ servicePort: 9528
+ - path: /detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-detection
+ servicePort: 9528
+ - path: /dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dikongzhixingh5
+ servicePort: 9528
+ - path: /dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dispatchh5
+ servicePort: 9528
+ - path: /emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-emergency-rescue
+ servicePort: 9528
+ - path: /eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-eventsh5
+ servicePort: 9528
+ - path: /flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-flight-control
+ servicePort: 9528
+ - path: /hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hljtt
+ servicePort: 9528
+ - path: /hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hyperspectral
+ servicePort: 9528
+ - path: /iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-iot-manager
+ servicePort: 9528
+ - path: /jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-jiangsuwenlv
+ servicePort: 9528
+ - path: /logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-logistics
+ servicePort: 9528
+ - path: /media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-media
+ servicePort: 9528
+ - path: /mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mianyangbackend
+ servicePort: 9528
+ - path: /multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-multiterminal
+ servicePort: 9528
+ - path: /mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mws
+ servicePort: 9528
+ - path: /oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-oms
+ servicePort: 9528
+ - path: /open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-open
+ servicePort: 9528
+ - path: /pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-pilot2-to-cloud
+ servicePort: 9528
+ - path: /qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qingdao
+ servicePort: 9528
+ - path: /qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qinghaitourism
+ servicePort: 9528
+ - path: /renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-renyike
+ servicePort: 9528
+ - path: /scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-scanner
+ servicePort: 9528
+ - path: /security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-security
+ servicePort: 9528
+ - path: /securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-securityh5
+ servicePort: 9528
+ - path: /seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-seniclive
+ servicePort: 9528
+ - path: /share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-share
+ servicePort: 9528
+ - path: /smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smauth
+ servicePort: 9528
+ - path: /smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smsecret
+ servicePort: 9528
+ - path: /splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-splice
+ servicePort: 9528
+ - path: /threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-threedsimulation
+ servicePort: 9528
+ - path: /traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-traffic
+ servicePort: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uaskny
+ servicePort: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+ - path: /uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasmskny
+ servicePort: 9528
+ - path: /visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-visualization
+ servicePort: 9528
+ - path: /uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-manager
+ servicePort: 9528
+ - path: /secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-security-center
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: xakny
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-data
+ servicePort: 8080
+ - host: cmii-admin-gateway.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - host: cmii-admin-user.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-user
+ servicePort: 8080
+ - host: cmii-app-release.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-app-release
+ servicePort: 8080
+ - host: cmii-open-gateway.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - host: cmii-sky-converge.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
+ - host: cmii-suav-supervision.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-supervision
+ servicePort: 8080
+ - host: cmii-uas-datahub.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-datahub
+ servicePort: 8080
+ - host: cmii-uas-gateway.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - host: cmii-uas-lifecycle.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-lifecycle
+ servicePort: 8080
+ - host: cmii-uav-advanced5g.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-advanced5g
+ servicePort: 8080
+ - host: cmii-uav-airspace.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-airspace
+ servicePort: 8080
+ - host: cmii-uav-alarm.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-alarm
+ servicePort: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-autowaypoint
+ servicePort: 8080
+ - host: cmii-uav-brain.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-brain
+ servicePort: 8080
+ - host: cmii-uav-bridge.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-bridge
+ servicePort: 8080
+ - host: cmii-uav-cloud-live.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cloud-live
+ servicePort: 8080
+ - host: cmii-uav-clusters.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-clusters
+ servicePort: 8080
+ - host: cmii-uav-cms.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cms
+ servicePort: 8080
+ - host: cmii-uav-data-post-process.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-data-post-process
+ servicePort: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-depotautoreturn
+ servicePort: 8080
+ - host: cmii-uav-developer.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-developer
+ servicePort: 8080
+ - host: cmii-uav-device.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-device
+ servicePort: 8080
+ - host: cmii-uav-emergency.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-emergency
+ servicePort: 8080
+ - host: cmii-uav-fwdd.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-fwdd
+ servicePort: 8080
+ - host: cmii-uav-gateway.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - host: cmii-uav-gis-server.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gis-server
+ servicePort: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-datasource
+ servicePort: 8080
+ - host: cmii-uav-grid-engine.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-engine
+ servicePort: 8080
+ - host: cmii-uav-grid-manage.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-manage
+ servicePort: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-industrial-portfolio
+ servicePort: 8080
+ - host: cmii-uav-integration.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-integration
+ servicePort: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-dispatcher
+ servicePort: 8080
+ - host: cmii-uav-iot-manager.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-manager
+ servicePort: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-kpi-monitor
+ servicePort: 8080
+ - host: cmii-uav-logger.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-logger
+ servicePort: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-material-warehouse
+ servicePort: 8080
+ - host: cmii-uav-mission.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mission
+ servicePort: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mqtthandler
+ servicePort: 8080
+ - host: cmii-uav-multilink.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-multilink
+ servicePort: 8080
+ - host: cmii-uav-notice.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-notice
+ servicePort: 8080
+ - host: cmii-uav-oauth.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-oauth
+ servicePort: 8080
+ - host: cmii-uav-process.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-process
+ servicePort: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sec-awareness
+ servicePort: 8080
+ - host: cmii-uav-security-trace.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-security-trace
+ servicePort: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sense-adapter
+ servicePort: 8080
+ - host: cmii-uav-surveillance.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-surveillance
+ servicePort: 8080
+ - host: cmii-uav-sync.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sync
+ servicePort: 8080
+ - host: cmii-uav-tcp-server.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tcp-server
+ servicePort: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-threedsimulation
+ servicePort: 8080
+ - host: cmii-uav-tower.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tower
+ servicePort: 8080
+ - host: cmii-uav-user.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-user
+ servicePort: 8080
+ - host: cmii-uav-watchdog.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-watchdog
+ servicePort: 8080
+ - host: cmii-uav-waypoint.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-waypoint
+ servicePort: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-pyfusion
+ servicePort: 8080
+ - host: cmii-uavms-security-center.uavcloud-xakny.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-security-center
+ servicePort: 8080
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: xakny
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.xakny.io
+ http:
+ paths:
+ - path: /oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - path: /open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - path: /api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - path: /uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/67-202508-雄安空能院/deploy/k8s-mongo.yaml b/67-202508-雄安空能院/deploy/k8s-mongo.yaml
new file mode 100644
index 0000000..b41e408
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-mongo.yaml
@@ -0,0 +1,78 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mongo
+ namespace: xakny
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ ports:
+ - port: 27017
+ name: server-27017
+ targetPort: 27017
+ nodePort: 37017
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mongo
+ namespace: xakny
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+spec:
+ serviceName: helm-mongo
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: uas-2.1
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: helm-mongo
+ image: 192.168.0.2:8033/cmii/mongo:5.0
+ resources: {}
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ volumeMounts:
+ - name: mongo-data
+ mountPath: /data/db
+ readOnly: false
+ subPath: default/helm-mongo/data/db
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+---
diff --git a/67-202508-雄安空能院/deploy/k8s-mysql.yaml b/67-202508-雄安空能院/deploy/k8s-mysql.yaml
new file mode 100644
index 0000000..8e096a9
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-mysql.yaml
@@ -0,0 +1,410 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-mysql
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ annotations: {}
+secrets:
+ - name: helm-mysql
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-mysql
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ mysql-root-password: "UXpmWFFoZDNiUQ=="
+ mysql-password: "S0F0cm5PckFKNw=="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ my.cnf: |-
+
+ [mysqld]
+ port=3306
+ basedir=/opt/bitnami/mysql
+ datadir=/bitnami/mysql/data
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ log-error=/bitnami/mysql/data/error.log
+ general_log_file = /bitnami/mysql/data/general.log
+ slow_query_log_file = /bitnami/mysql/data/slow.log
+ innodb_data_file_path = ibdata1:512M:autoextend
+ innodb_buffer_pool_size = 512M
+ innodb_buffer_pool_instances = 2
+ innodb_log_file_size = 512M
+ innodb_log_files_in_group = 4
+ innodb_log_files_in_group = 4
+ log-bin = /bitnami/mysql/data/mysql-bin
+ max_binlog_size=1G
+ transaction_isolation = REPEATABLE-READ
+ default_storage_engine = innodb
+ character-set-server = utf8mb4
+ collation-server=utf8mb4_bin
+ binlog_format = ROW
+ binlog_rows_query_log_events=on
+ binlog_cache_size=4M
+ binlog_expire_logs_seconds = 1296000
+ max_binlog_cache_size=2G
+ gtid_mode = on
+ enforce_gtid_consistency = 1
+ sync_binlog = 1
+ innodb_flush_log_at_trx_commit = 1
+ innodb_flush_method = O_DIRECT
+ log_slave_updates=1
+ relay_log_recovery = 1
+ relay-log-purge = 1
+ default_time_zone = '+08:00'
+ lower_case_table_names=1
+ log_bin_trust_function_creators=1
+ group_concat_max_len=67108864
+ innodb_io_capacity = 4000
+ innodb_io_capacity_max = 8000
+ innodb_flush_sync = 0
+ innodb_flush_neighbors = 0
+ innodb_write_io_threads = 8
+ innodb_read_io_threads = 8
+ innodb_purge_threads = 4
+ innodb_page_cleaners = 4
+ innodb_open_files = 65535
+ innodb_max_dirty_pages_pct = 50
+ innodb_lru_scan_depth = 4000
+ innodb_checksum_algorithm = crc32
+ innodb_lock_wait_timeout = 10
+ innodb_rollback_on_timeout = 1
+ innodb_print_all_deadlocks = 1
+ innodb_file_per_table = 1
+ innodb_online_alter_log_max_size = 4G
+ innodb_stats_on_metadata = 0
+ innodb_thread_concurrency = 0
+ innodb_sync_spin_loops = 100
+ innodb_spin_wait_delay = 30
+ lock_wait_timeout = 3600
+ slow_query_log = 1
+ long_query_time = 10
+ log_queries_not_using_indexes =1
+ log_throttle_queries_not_using_indexes = 60
+ min_examined_row_limit = 100
+ log_slow_admin_statements = 1
+ log_slow_slave_statements = 1
+ default_authentication_plugin=mysql_native_password
+ skip-name-resolve=1
+ explicit_defaults_for_timestamp=1
+ plugin_dir=/opt/bitnami/mysql/plugin
+ max_allowed_packet=128M
+ max_connections = 2000
+ max_connect_errors = 1000000
+ table_definition_cache=2000
+ table_open_cache_instances=64
+ tablespace_definition_cache=1024
+ thread_cache_size=256
+ interactive_timeout = 600
+ wait_timeout = 600
+ tmpdir=/opt/bitnami/mysql/tmp
+ max_allowed_packet=32M
+ bind-address=0.0.0.0
+ performance_schema = 1
+ performance_schema_instrument = '%memory%=on'
+ performance_schema_instrument = '%lock%=on'
+ innodb_monitor_enable=ALL
+
+ [mysql]
+ no-auto-rehash
+
+ [mysqldump]
+ quick
+ max_allowed_packet = 32M
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ default-character-set=UTF8
+ plugin_dir=/opt/bitnami/mysql/plugin
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql-init-scripts
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ create_users_grants_core.sql: |-
+ create user zyly@'%' identified by 'Cmii@451315';
+ grant select on *.* to zyly@'%';
+ create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
+ grant all on *.* to zyly_qc@'%';
+ create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
+ grant all on *.* to k8s_admin@'%';
+ create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
+ grant all on *.* to audit_dba@'%';
+ create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
+ GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
+ create user monitor@'%' identified by 'PL3#nGtrWbf-';
+ grant REPLICATION CLIENT on *.* to monitor@'%';
+ flush privileges;
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-mysql
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xakny
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+spec:
+ ports:
+ - name: mysql
+ protocol: TCP
+ port: 13306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xakny
+ cmii.app: mysql
+ cmii.type: middleware
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql-headless
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: mysql
+ port: 3306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xakny
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: NodePort
+ ports:
+ - name: mysql
+ port: 3306
+ protocol: TCP
+ targetPort: mysql
+ nodePort: 33306
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xakny
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mysql
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xakny
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ serviceName: helm-mysql
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-mysql
+ affinity: {}
+ nodeSelector:
+ mysql-deploy: "true"
+ securityContext:
+ fsGroup: 1001
+ initContainers:
+ - name: change-volume-permissions
+ image: 192.168.0.2:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ containers:
+ - name: mysql
+ image: 192.168.0.2:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-mysql
+ key: mysql-root-password
+ - name: MYSQL_DATABASE
+ value: "cmii"
+ ports:
+ - name: mysql
+ containerPort: 3306
+ livenessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ readinessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ startupProbe:
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ - name: config
+ mountPath: /opt/bitnami/mysql/conf/my.cnf
+ subPath: my.cnf
+ volumes:
+ - name: config
+ configMap:
+ name: helm-mysql
+ - name: custom-init-scripts
+ configMap:
+ name: helm-mysql-init-scripts
+ - name: mysql-data
+ hostPath:
+ path: /var/lib/docker/mysql-pv/xakny/
diff --git a/67-202508-雄安空能院/deploy/k8s-nacos.yaml b/67-202508-雄安空能院/deploy/k8s-nacos.yaml
new file mode 100644
index 0000000..695626c
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-nacos.yaml
@@ -0,0 +1,130 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-nacos-cm
+ namespace: xakny
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: uas-2.1
+data:
+ mysql.db.name: "cmii_nacos_config"
+ mysql.db.host: "helm-mysql"
+ mysql.port: "3306"
+ mysql.user: "k8s_admin"
+ mysql.password: "fP#UaH6qQ3)8"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-nacos
+ namespace: xakny
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: uas-2.1
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ ports:
+ - port: 8848
+ name: server
+ targetPort: 8848
+ nodePort: 38848
+ - port: 9848
+ name: server12
+ targetPort: 9848
+ - port: 9849
+ name: server23
+ targetPort: 9849
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-nacos
+ namespace: xakny
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: uas-2.1
+spec:
+ serviceName: helm-nacos
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: uas-2.1
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: nacos-server
+ image: 192.168.0.2:8033/cmii/nacos-server:v2.1.2
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ - containerPort: 9848
+ name: tcp-9848
+ - containerPort: 9849
+ name: tcp-9849
+ env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.name
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.port
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.user
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.password
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.host
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: "hostname"
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+---
diff --git a/67-202508-雄安空能院/deploy/k8s-nfs-test.yaml b/67-202508-雄安空能院/deploy/k8s-nfs-test.yaml
new file mode 100644
index 0000000..4acc47b
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-prod-distribute
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 192.168.0.2:8033/cmii/busybox:latest
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/67-202508-雄安空能院/deploy/k8s-nfs.yaml b/67-202508-雄安空能院/deploy/k8s-nfs.yaml
new file mode 100644
index 0000000..39ad7c4
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-prod-distribute
+provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: 192.168.0.2:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-storage
+ - name: NFS_SERVER
+ value: 192.168.0.6
+ - name: NFS_PATH
+ value: /var/lib/docker/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 192.168.0.6
+ path: /var/lib/docker/nfs_data
diff --git a/67-202508-雄安空能院/deploy/k8s-pvc.yaml b/67-202508-雄安空能院/deploy/k8s-pvc.yaml
new file mode 100644
index 0000000..6cb103c
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-pvc.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: xakny
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.1
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 100Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-emqxs
+ namespace: xakny
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-emqxs
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.1
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-mongo
+ namespace: xakny
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-mongo
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.1
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 30Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-rabbitmq
+ namespace: xakny
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-rabbitmq
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: uas-2.1
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/67-202508-雄安空能院/deploy/k8s-rabbitmq.yaml b/67-202508-雄安空能院/deploy/k8s-rabbitmq.yaml
new file mode 100644
index 0000000..5324066
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-rabbitmq.yaml
@@ -0,0 +1,328 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-rabbitmq
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+automountServiceAccountToken: true
+secrets:
+ - name: helm-rabbitmq
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-rabbitmq
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+type: Opaque
+data:
+ rabbitmq-password: "blljUk45MXIuX2hq"
+ rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-rabbitmq-config
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+data:
+ rabbitmq.conf: |-
+ ## Username and password
+ ##
+ default_user = admin
+ default_pass = nYcRN91r._hj
+ ## Clustering
+ ##
+ cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
+ cluster_formation.node_cleanup.interval = 10
+ cluster_formation.node_cleanup.only_log_warning = true
+ cluster_partition_handling = autoheal
+ # queue master locator
+ queue_master_locator = min-masters
+ # enable guest user
+ loopback_users.guest = false
+ #default_vhost = default-vhost
+ #disk_free_limit.absolute = 50MB
+ #load_definitions = /app/load_definition.json
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+subjects:
+ - kind: ServiceAccount
+ name: helm-rabbitmq
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: helm-rabbitmq-endpoint-reader
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq-headless
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ clusterIP: None
+ ports:
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ - name: dist
+ port: 25672
+ targetPort: dist
+ - name: dashboard
+ port: 15672
+ targetPort: stats
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xakny
+ publishNotReadyAddresses: true
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ type: NodePort
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ nodePort: 35672
+ - name: dashboard
+ port: 15672
+ targetPort: dashboard
+ nodePort: 36675
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xakny
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-rabbitmq
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ serviceName: helm-rabbitmq-headless
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xakny
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: rabbitmq
+ annotations:
+ checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
+ checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-rabbitmq
+ affinity: {}
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ - name: volume-permissions
+ image: 192.168.0.2:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ securityContext:
+ runAsUser: 0
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ containers:
+ - name: rabbitmq
+ image: 192.168.0.2:8033/cmii/rabbitmq:3.9.12-debian-10-r3
+ imagePullPolicy: "Always"
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: "helm-rabbitmq-headless"
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: K8S_HOSTNAME_SUFFIX
+ value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: RABBITMQ_MNESIA_DIR
+ value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: "-"
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-erlang-cookie
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: "admin"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-password
+ - name: RABBITMQ_PLUGINS
+ value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
+ ports:
+ - name: amqp
+ containerPort: 5672
+ - name: dist
+ containerPort: 25672
+ - name: dashboard
+ containerPort: 15672
+ - name: epmd
+ containerPort: 4369
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: configuration
+ mountPath: /bitnami/rabbitmq/conf
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ volumes:
+ - name: configuration
+ configMap:
+ name: helm-rabbitmq-config
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
diff --git a/67-202508-雄安空能院/deploy/k8s-redis.yaml b/67-202508-雄安空能院/deploy/k8s-redis.yaml
new file mode 100644
index 0000000..b3597a3
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-redis.yaml
@@ -0,0 +1,593 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: true
+metadata:
+ name: helm-redis
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-redis
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ redis-password: "TWNhY2hlQDQ1MjI="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-configuration
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ # End of common configuration
+ master.conf: |-
+ dir /data
+ # User-supplied master configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of master configuration
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ # User-supplied replica configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of replica configuration
+---
+# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-health
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-scripts
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+data:
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec redis-server "${ARGS[@]}"
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo 26379
+ ;;
+ "REDIS")
+ echo 6379
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec redis-server "${ARGS[@]}"
+---
+# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-headless
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xakny
+---
+# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-master
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ type: ClusterIP
+
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xakny
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-replicas
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/component: replica
+---
+# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-master
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xakny
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - xakny
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ imagePullSecrets:
+ - name: harborsecret
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.0.2:8033/cmii/redis:6.2.6-debian-10-r0
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-replicas
+ namespace: xakny
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/component: replica
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xakny
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.0.2:8033/cmii/redis:6.2.6-debian-10-r0
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.xakny.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+
diff --git a/67-202508-雄安空能院/deploy/k8s-srs.yaml b/67-202508-雄安空能院/deploy/k8s-srs.yaml
new file mode 100644
index 0000000..11b5d9c
--- /dev/null
+++ b/67-202508-雄安空能院/deploy/k8s-srs.yaml
@@ -0,0 +1,496 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-srs-cm
+ namespace: xakny
+ labels:
+ cmii.app: live-srs
+ cmii.type: live
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+data:
+ srs.rtc.conf: |-
+ listen 31935;
+ max_connections 4096;
+ srs_log_tank console;
+ srs_log_level info;
+ srs_log_file /home/srs.log;
+ daemon off;
+ http_api {
+ enabled on;
+ listen 1985;
+ crossdomain on;
+ }
+ stats {
+ network 0;
+ }
+ http_server {
+ enabled on;
+ listen 8080;
+ dir /home/hls;
+ }
+ srt_server {
+ enabled on;
+ listen 30556;
+ maxbw 1000000000;
+ connect_timeout 4000;
+ peerlatency 600;
+ recvlatency 600;
+ }
+ rtc_server {
+ enabled on;
+ listen 30090;
+ candidate $CANDIDATE;
+ }
+ vhost __defaultVhost__ {
+ http_hooks {
+ enabled on;
+ on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
+ }
+ http_remux {
+ enabled on;
+ }
+ rtc {
+ enabled on;
+ rtmp_to_rtc on;
+ rtc_to_rtmp on;
+ keep_bframe off;
+ }
+ tcp_nodelay on;
+ min_latency on;
+ play {
+ gop_cache off;
+ mw_latency 100;
+ mw_msgs 10;
+ }
+ publish {
+ firstpkt_timeout 8000;
+ normal_timeout 4000;
+ mr on;
+ }
+ dvr {
+ enabled off;
+ dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
+ dvr_plan session;
+ }
+ hls {
+ enabled on;
+ hls_path /home/hls;
+ hls_fragment 10;
+ hls_window 60;
+ hls_m3u8_file [app]/[stream].m3u8;
+ hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
+ hls_cleanup on;
+ hls_entry_prefix http://111.63.69.71:8088;
+ }
+ }
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc-exporter
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ nodePort: 31935
+ - name: rtc
+ protocol: UDP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: rtc-tcp
+ protocol: TCP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: srt
+ protocol: UDP
+ port: 30556
+ targetPort: 30556
+ nodePort: 30556
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ nodePort: 30080
+ selector:
+ srs-role: rtc
+ type: NodePort
+ sessionAffinity: None
+ externalTrafficPolicy: Cluster
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srsrtc-svc
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: helm-live-srs-rtc
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-srs
+ cmii.type: live
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ srs-role: rtc
+ template:
+ metadata:
+ labels:
+ srs-role: rtc
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-srs-cm
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ defaultMode: 420
+ - name: srs-vol
+ emptyDir:
+ sizeLimit: 8Gi
+ containers:
+ - name: srs-rtc
+ image: 192.168.0.2:8033/cmii/srs:v5.0.195
+ ports:
+ - name: srs-rtmp
+ containerPort: 31935
+ protocol: TCP
+ - name: srs-api
+ containerPort: 1985
+ protocol: TCP
+ - name: srs-flv
+ containerPort: 8080
+ protocol: TCP
+ - name: srs-webrtc
+ containerPort: 30090
+ protocol: UDP
+ - name: srs-webrtc-tcp
+ containerPort: 30090
+ protocol: TCP
+ - name: srs-srt
+ containerPort: 30556
+ protocol: UDP
+ env:
+ - name: CANDIDATE
+ value: 111.63.69.71
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /usr/local/srs/conf/docker.conf
+ subPath: docker.conf
+ - name: srs-vol
+ mountPath: /home/dvr
+ subPath: xakny/helm-live/dvr
+ - name: srs-vol
+ mountPath: /home/hls
+ subPath: xakny/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ - name: oss-adaptor
+ image: 192.168.0.2:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ env:
+ - name: OSS_ENDPOINT
+ value: 'http://helm-minio:9000'
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: 'B#923fC7mk'
+ - name: OSS_BUCKET
+ value: live-cluster-hls
+ - name: SRS_OP
+ value: 'http://helm-live-op-svc-v2:8080'
+ - name: MYSQL_ENDPOINT
+ value: 'helm-mysql:3306'
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: 'yes'
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-vol
+ mountPath: /cmii/share/hls
+ subPath: xakny/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ serviceName: helm-live-srsrtc-svc
+ podManagementPolicy: OrderedReady
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: 0
+ revisionHistoryLimit: 10
+---
+# live-srs部分
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-live-op-v2
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+ helm.sh/chart: cmlc-live-live-op-2.0.0
+ live-role: op-v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ live-role: op-v2
+ template:
+ metadata:
+ labels:
+ live-role: op-v2
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-op-cm-v2
+ items:
+ - key: live.op.conf
+ path: bootstrap.yaml
+ defaultMode: 420
+ containers:
+ - name: helm-live-op-v2
+ image: 192.168.0.2:8033/cmii/cmii-live-operator:5.2.0
+ ports:
+ - name: operator
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 4800m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /cmii/bootstrap.yaml
+ subPath: bootstrap.yaml
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc-v2
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ nodePort: 30333
+ selector:
+ live-role: op-v2
+ type: NodePort
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ live-role: op
+ type: ClusterIP
+ sessionAffinity: None
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-op-cm-v2
+ namespace: xakny
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+data:
+ live.op.conf: |-
+ server:
+ port: 8080
+ spring:
+ main:
+ allow-bean-definition-overriding: true
+ allow-circular-references: true
+ application:
+ name: cmii-live-operator
+ platform:
+ info:
+ name: cmii-live-operator
+ description: cmii-live-operator
+ version: uas-2.1
+ scanPackage: com.cmii.live.op
+ cloud:
+ nacos:
+ config:
+ username: nacos
+ password: KingKong@95461234
+ server-addr: helm-nacos:8848
+ extension-configs:
+ - data-id: cmii-live-operator.yml
+ group: uas-2.1
+ refresh: true
+ shared-configs:
+ - data-id: cmii-backend-system.yml
+ group: uas-2.1
+ refresh: true
+ discovery:
+ enabled: false
+
+ live:
+ engine:
+ type: srs
+ endpoint: 'http://helm-live-srs-svc:1985'
+ proto:
+ rtmp: 'rtmp://111.63.69.71:31935'
+ rtsp: 'rtsp://111.63.69.71:30554'
+ srt: 'srt://111.63.69.71:30556'
+ flv: 'http://111.63.69.71:30500'
+ hls: 'http://111.63.69.71:30500'
+ rtc: 'webrtc://111.63.69.71:30080'
+ replay: 'https://111.63.69.71:30333'
+ minio:
+ endpoint: http://helm-minio:9000
+ access-key: cmii
+ secret-key: B#923fC7mk
+ bucket: live-cluster-hls
diff --git a/67-202508-雄安空能院/disk.sh b/67-202508-雄安空能院/disk.sh
new file mode 100644
index 0000000..8199307
--- /dev/null
+++ b/67-202508-雄安空能院/disk.sh
@@ -0,0 +1,63 @@
+#! /bin/bash
+
+ install lvm2 -y
+echo ""
+echo ""
+echo ""
+echo "-----------------------------------------------------------------------"
+
+export VG_NAME=datavg
+
+echo "n
+p
+
+
+
+t
+
+8e
+w
+" | fdisk /dev/vdb
+partprobe
+# 如果已经存在卷组,直接进行添加
+# vgextend rootvg /dev/sdc1
+vgcreate ${VG_NAME} /dev/vdb1
+export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
+# 大小根据实际情况调整
+lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
+mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
+#mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
+mkdir -p /home/app-plus
+#mkdir -p /var/lib/docker
+#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
+export selffstab="/dev/mapper/${VG_NAME}-lvdata /home/app-plus xfs defaults 0 0"
+echo "${selffstab}" >> /etc/fstab
+mount -a
+
+echo ""
+echo ""
+echo ""
+df -TH
+echo "-----------------------------------------------------------------------"
+s
+# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
+# lvextend -l +100%FREE /dev/mapper/s${VG_NAME}-root
+# xfs_growfs /dev/mapper/${VG_NAME}-root
+
+# 自定义 安装lvm2'
+echo "n
+p
+
+
+
+t
+
+8e
+w
+" | fdisk /dev/vda
+partprobe
+vgextend klas_host-10-190-202-141 /dev/vda4
+lvextend -l +100%FREE /dev/mapper/klas_host--10--190--202--141-root
+partprobe
+xfs_growfs /dev/mapper/klas_host--10--190--202--141-root
+df -TH
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-be-configmap.yaml b/67-202508-雄安空能院/doris-deploy/doris-be-configmap.yaml
new file mode 100644
index 0000000..6e75247
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-be-configmap.yaml
@@ -0,0 +1,82 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: doris-cluster-be-conf
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: be
+data:
+ be.conf: >
+ CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+ # Log dir
+ LOG_DIR="${DORIS_HOME}/log/"
+
+ # For jdk 8
+ JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
+
+ # Set your own JAVA_HOME
+ # JAVA_HOME=/path/to/jdk/
+
+ # https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
+ # https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
+ JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
+ JEMALLOC_PROF_PRFIX=""
+
+ # ports for admin, web, heartbeat service
+ be_port = 9060
+ webserver_port = 8040
+ heartbeat_service_port = 9050
+ brpc_port = 8060
+ arrow_flight_sql_port = -1
+
+ # HTTPS configures
+ enable_https = false
+ # path of certificate in PEM format.
+ #ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
+ # path of private key in PEM format.
+ #ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
+
+ # Choose one if there are more than one ip except loopback address.
+ # Note that there should at most one ip match this list.
+ # If no ip match this rule, will choose one randomly.
+ # use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
+ # Default value is empty.
+ # priority_networks = 10.10.10.0/24;192.168.0.0/16
+
+ # data root path, separate by ';'
+ # You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
+ # eg:
+ # storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
+ # storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
+ # /home/disk2/doris,medium:HDD(default)
+ #
+ # you also can specify the properties by setting ':', separate by ','
+ # property 'medium' has a higher priority than the extension of path
+ #
+ # Default value is ${DORIS_HOME}/storage, you should create it by hand.
+ # storage_root_path = ${DORIS_HOME}/storage
+
+ # Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
+ # jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
+
+ # Advanced configurations
+ # INFO, WARNING, ERROR, FATAL
+ sys_log_level = INFO
+ # sys_log_roll_mode = SIZE-MB-1024
+ # sys_log_roll_num = 10
+ # sys_log_verbose_modules = *
+ # log_buffer_level = -1
+
+ # aws sdk log level
+ # Off = 0,
+ # Fatal = 1,
+ # Error = 2,
+ # Warn = 3,
+ # Info = 4,
+ # Debug = 5,
+ # Trace = 6
+ # Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
+ #aws_log_level=0
+ ## If you are not running in aws cloud, you can disable EC2 metadata
+ #AWS_EC2_METADATA_DISABLED=false
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-be-internal-service.yaml b/67-202508-雄安空能院/doris-deploy/doris-be-internal-service.yaml
new file mode 100644
index 0000000..ad9f1b2
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-be-internal-service.yaml
@@ -0,0 +1,17 @@
+kind: Service
+apiVersion: v1
+metadata:
+ namespace: xakny
+ name: doris-cluster-be-internal
+ labels:
+ app.kubernetes.io/component: doris-cluster-be-internal
+spec:
+ ports:
+ - name: heartbeat-port
+ protocol: TCP
+ port: 9050
+ targetPort: 9050
+ selector:
+ app.kubernetes.io/component: doris-cluster-be
+ clusterIP: None
+ type: ClusterIP
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-be-service.yaml b/67-202508-雄安空能院/doris-deploy/doris-be-service.yaml
new file mode 100644
index 0000000..a63a3b2
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-be-service.yaml
@@ -0,0 +1,32 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-be-service
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+spec:
+ ports:
+ - name: be-port
+ protocol: TCP
+ port: 9060
+ targetPort: 9060
+ nodePort: 32189
+ - name: webserver-port
+ protocol: TCP
+ port: 8040
+ targetPort: 8040
+ nodePort: 31624
+ - name: heartbeat-port
+ protocol: TCP
+ port: 9050
+ targetPort: 9050
+ nodePort: 31625
+ - name: brpc-port
+ protocol: TCP
+ port: 8060
+ targetPort: 8060
+ nodePort: 31627
+ selector:
+ app.kubernetes.io/component: doris-cluster-be
+ type: NodePort
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-be-statusfulset.yaml b/67-202508-雄安空能院/doris-deploy/doris-be-statusfulset.yaml
new file mode 100644
index 0000000..51dc30d
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-be-statusfulset.yaml
@@ -0,0 +1,214 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: doris-cluster-be
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: doris-cluster-be
+ template:
+ metadata:
+ name: doris-cluster-be
+ labels:
+ app.kubernetes.io/component: doris-cluster-be
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ volumes:
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: labels
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ - path: annotations
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.annotations
+ defaultMode: 420
+ - name: doris-cluster-be-conf
+ configMap:
+ name: doris-cluster-be-conf
+ defaultMode: 420
+ - name: be-storage
+ persistentVolumeClaim:
+ claimName: doris-be-storage-pvc
+ - name: be-log
+ persistentVolumeClaim:
+ claimName: doris-fe-log-pvc
+ initContainers:
+ - name: default-init
+ image: '192.168.0.2:8033/cmii/alpine:1.0.0'
+ command:
+ - /bin/sh
+ args:
+ - '-c'
+ - sysctl -w vm.max_map_count=2000000 && swapoff -a
+ resources:
+ limits:
+ cpu: '1'
+ memory: 1Gi
+ requests:
+ cpu: '0.5'
+ memory: 500Mi
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ containers:
+ - name: be
+ image: '192.168.0.2:8033/cmii/doris.be-ubuntu:2.1.6'
+ command:
+ - /opt/apache-doris/be_entrypoint.sh
+ args:
+ - $(ENV_FE_ADDR)
+ ports:
+ - name: be-port
+ containerPort: 9060
+ protocol: TCP
+ - name: webserver-port
+ containerPort: 8040
+ protocol: TCP
+ - name: heartbeat-port
+ containerPort: 9050
+ protocol: TCP
+ - name: brpc-port
+ containerPort: 8060
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CONFIGMAP_MOUNT_PATH
+ value: /etc/doris
+ - name: USER
+ value: root
+ - name: DORIS_ROOT
+ value: /opt/apache-doris
+ - name: ENV_FE_ADDR
+ value: doris-cluster-fe-service
+ - name: FE_QUERY_PORT
+ value: '9030'
+ resources:
+ limits:
+ cpu: '8'
+ memory: 8Gi
+ requests:
+ cpu: '4'
+ memory: 4Gi
+ volumeMounts:
+ - name: podinfo
+ mountPath: /etc/podinfo
+ - name: be-storage
+ mountPath: /opt/apache-doris/be/storage
+ - name: be-log
+ mountPath: /opt/apache-doris/be/log
+ - name: doris-cluster-be-conf
+ mountPath: /etc/doris
+ livenessProbe:
+ tcpSocket:
+ port: 9050
+ initialDelaySeconds: 80
+ timeoutSeconds: 180
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 8040
+ scheme: HTTP
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ tcpSocket:
+ port: 9050
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 60
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/apache-doris/be_prestop.sh
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: doris.cluster
+ operator: In
+ values:
+ - "true"
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values:
+ - doris-cluster-be
+ topologyKey: kubernetes.io/hostname
+ schedulerName: default-scheduler
+# volumeClaimTemplates:
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: be-storage
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: '10'
+# storageClassName: nfs-prod-distribute
+# volumeMode: Filesystem
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: be-log
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: '10'
+# storageClassName: nfs-prod-distribute
+# volumeMode: Filesystem
+ serviceName: doris-cluster-be-internal
+ podManagementPolicy: Parallel
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-fe-configmap.yaml b/67-202508-雄安空能院/doris-deploy/doris-fe-configmap.yaml
new file mode 100644
index 0000000..7cea328
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-fe-configmap.yaml
@@ -0,0 +1,67 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: doris-cluster-fe-conf
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: fe
+data:
+ fe.conf: |
+ #####################################################################
+ ## The uppercase properties are read and exported by bin/start_fe.sh.
+ ## To see all Frontend configurations,
+ ## see fe/src/org/apache/doris/common/Config.java
+ #####################################################################
+
+ CUR_DATE=`date +%Y%m%d-%H%M%S`
+
+ # Log dir
+ LOG_DIR = ${DORIS_HOME}/log
+
+ # For jdk 8
+ JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
+
+ # Set your own JAVA_HOME
+ # JAVA_HOME=/path/to/jdk/
+
+ ##
+ ## the lowercase properties are read by main program.
+ ##
+
+ # store metadata, must be created before start FE.
+ # Default value is ${DORIS_HOME}/doris-meta
+ # meta_dir = ${DORIS_HOME}/doris-meta
+
+ # Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
+ # jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
+
+ http_port = 8030
+ rpc_port = 9020
+ query_port = 9030
+ edit_log_port = 9010
+ arrow_flight_sql_port = -1
+
+ # Choose one if there are more than one ip except loopback address.
+ # Note that there should at most one ip match this list.
+ # If no ip match this rule, will choose one randomly.
+ # use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
+ # Default value is empty.
+ # priority_networks = 10.10.10.0/24;192.168.0.0/16
+
+ # Advanced configurations
+ # log_roll_size_mb = 1024
+ # INFO, WARN, ERROR, FATAL
+ sys_log_level = INFO
+ # NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式,日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
+ sys_log_mode = ASYNC
+ # sys_log_roll_num = 10
+ # sys_log_verbose_modules = org.apache.doris
+ # audit_log_dir = $LOG_DIR
+ # audit_log_modules = slow_query, query
+ # audit_log_roll_num = 10
+ # meta_delay_toleration_second = 10
+ # qe_max_connection = 1024
+ # qe_query_timeout_second = 300
+ # qe_slow_log_ms = 5000
+ #Fully Qualified Domain Name,完全限定域名,开启后各节点之间通信基于FQDN
+ enable_fqdn_mode = true
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-fe-internal-service.yaml b/67-202508-雄安空能院/doris-deploy/doris-fe-internal-service.yaml
new file mode 100644
index 0000000..50a7b2b
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-fe-internal-service.yaml
@@ -0,0 +1,17 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-fe-internal
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ ports:
+ - name: query-port
+ protocol: TCP
+ port: 9030
+ targetPort: 9030
+ selector:
+ app.kubernetes.io/component: doris-cluster-fe
+ clusterIP: None
+ type: ClusterIP
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-fe-service.yaml b/67-202508-雄安空能院/doris-deploy/doris-fe-service.yaml
new file mode 100644
index 0000000..d76f541
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-fe-service.yaml
@@ -0,0 +1,32 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: doris-cluster-fe-service
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ ports:
+ - name: http-port
+ protocol: TCP
+ port: 8030
+ targetPort: 8030
+ nodePort: 31620
+ - name: rpc-port
+ protocol: TCP
+ port: 9020
+ targetPort: 9020
+ nodePort: 31621
+ - name: query-port
+ protocol: TCP
+ port: 9030
+ targetPort: 9030
+ nodePort: 31622
+ - name: edit-log-port
+ protocol: TCP
+ port: 9010
+ targetPort: 9010
+ nodePort: 31623
+ selector:
+ app.kubernetes.io/component: doris-cluster-fe
+ type: NodePort
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-fe-statusfulset.yaml b/67-202508-雄安空能院/doris-deploy/doris-fe-statusfulset.yaml
new file mode 100644
index 0000000..8fa5548
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-fe-statusfulset.yaml
@@ -0,0 +1,198 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: doris-cluster-fe
+ namespace: xakny
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: doris-cluster-fe
+ template:
+ metadata:
+ name: doris-cluster-fe
+ labels:
+ app.kubernetes.io/component: doris-cluster-fe
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ volumes:
+ - name: meta
+ persistentVolumeClaim:
+# claimName: meta
+ claimName: doris-fe-meta-pvc
+ - name: log
+ persistentVolumeClaim:
+ # claimName: meta
+ claimName: doris-fe-log-pvc
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: labels
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ - path: annotations
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.annotations
+ defaultMode: 420
+ - name: doris-cluster-fe-conf
+ configMap:
+ name: doris-cluster-fe-conf
+ defaultMode: 420
+ containers:
+ - name: doris-cluster-fe
+ image: '192.168.0.2:8033/cmii/doris.fe-ubuntu:2.1.6'
+ command:
+ - /opt/apache-doris/fe_entrypoint.sh
+ args:
+ - $(ENV_FE_ADDR)
+ ports:
+ - name: http-port
+ containerPort: 8030
+ protocol: TCP
+ - name: rpc-port
+ containerPort: 9020
+ protocol: TCP
+ - name: query-port
+ containerPort: 9030
+ protocol: TCP
+ - name: edit-log-port
+ containerPort: 9010
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.hostIP
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: CONFIGMAP_MOUNT_PATH
+ value: /etc/doris
+ - name: USER
+ value: root
+ - name: DORIS_ROOT
+ value: /opt/apache-doris
+ - name: ENV_FE_ADDR
+ value: doris-cluster-fe-service
+ - name: FE_QUERY_PORT
+ value: '9030'
+ - name: ELECT_NUMBER
+ value: '3'
+ resources:
+ limits:
+ cpu: '4'
+ memory: 8Gi
+ requests:
+ cpu: '2'
+ memory: 4Gi
+ volumeMounts:
+ - name: podinfo
+ mountPath: /etc/podinfo
+ - name: log
+ mountPath: /opt/apache-doris/fe/log
+ - name: meta
+ mountPath: /opt/apache-doris/fe/doris-meta
+ - name: doris-cluster-fe-conf
+ mountPath: /etc/doris
+ livenessProbe:
+ tcpSocket:
+ port: 9030
+ initialDelaySeconds: 80
+ timeoutSeconds: 180
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 8030
+ scheme: HTTP
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ tcpSocket:
+ port: 9030
+ timeoutSeconds: 1
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 60
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/apache-doris/fe_prestop.sh
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: doris.cluster
+ operator: In
+ values:
+ - "true"
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/component
+ operator: In
+ values:
+ - doris-cluster-fe
+ topologyKey: kubernetes.io/hostname
+ schedulerName: default-scheduler
+# volumeClaimTemplates:
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: meta
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: 10G
+# storageClassName: hcms-efs-class
+# volumeMode: Filesystem
+# - kind: PersistentVolumeClaim
+# apiVersion: v1
+# metadata:
+# name: log
+# spec:
+# accessModes:
+# - ReadWriteOnce
+# resources:
+# requests:
+# storage: '10'
+# storageClassName: hcms-efs-class
+# volumeMode: Filesystem
+ serviceName: doris-cluster-fe-internal
+ podManagementPolicy: Parallel
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/doris-pvc.yaml b/67-202508-雄安空能院/doris-deploy/doris-pvc.yaml
new file mode 100644
index 0000000..c4dd4b2
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/doris-pvc.yaml
@@ -0,0 +1,60 @@
+---
+# pvc.yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-fe-meta-pvc
+ namespace: xakny
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Gi
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-fe-log-pvc
+ namespace: xakny
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Gi
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-be-storage-pvc
+ namespace: xakny
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Gi # 根据实际存储需求调整
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: doris-be-log-pvc
+ namespace: xakny
+spec:
+ storageClassName: nfs-prod-distribute
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Gi
\ No newline at end of file
diff --git a/67-202508-雄安空能院/doris-deploy/修改pvc-然后statefulset中的image.txt b/67-202508-雄安空能院/doris-deploy/修改pvc-然后statefulset中的image.txt
new file mode 100644
index 0000000..f861307
--- /dev/null
+++ b/67-202508-雄安空能院/doris-deploy/修改pvc-然后statefulset中的image.txt
@@ -0,0 +1,5 @@
+
+
+修改PVC文件
+修改全部的NAMESPACE
+修改statefulset里面的IMAGE
\ No newline at end of file
diff --git a/67-202508-雄安空能院/helm-minio.yaml b/67-202508-雄安空能院/helm-minio.yaml
new file mode 100644
index 0000000..4dc0775
--- /dev/null
+++ b/67-202508-雄安空能院/helm-minio.yaml
@@ -0,0 +1,79 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ namespace: xakny
+ name: helm-minio
+spec:
+ serviceName: helm-minio
+ replicas: 1
+ selector:
+ matchLabels:
+ app: helm-minio
+ template:
+ metadata:
+ labels:
+ app: helm-minio
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: minio.node
+ operator: In
+ values:
+ - "true"
+ containers:
+ - name: minio
+ image: 192.168.0.2:8033/cmii/minio:RELEASE.2023-06-02T23-17-26Z
+ command: ["/bin/sh", "-c"]
+ args:
+ - minio server /data --console-address ":9001"
+ ports:
+ - containerPort: 9000
+ name: api
+ - containerPort: 9001
+ name: console
+ env:
+ - name: MINIO_ACCESS_KEY
+ value: "cmii"
+ - name: MINIO_SECRET_KEY
+ value: "B#923fC7mk"
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ volumes:
+ - name: data
+# persistentVolumeClaim:
+# claimName: helm-minio
+ hostPath:
+ path: /var/lib/docker/minio-pv/xakny/
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-minio
+ namespace: xakny
+spec:
+ selector:
+ app: helm-minio
+ ports:
+ - name: api
+ port: 9000
+ targetPort: 9000
+ nodePort: 39000
+ - name: console
+ port: 9001
+ targetPort: 9001
+ nodePort: 39001
+ type: NodePort
diff --git a/67-202508-雄安空能院/x_minio初始化.sh b/67-202508-雄安空能院/x_minio初始化.sh
new file mode 100644
index 0000000..a6cb6b1
--- /dev/null
+++ b/67-202508-雄安空能院/x_minio初始化.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+export tenant_name=outside
+export inner_master_ip=192.168.0.2
+export minio_host_ip=192.168.0.2
+
+mc alias set ${tenant_name} http://${minio_host_ip}:39000 cmii B#923fC7mk
+
+
+mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata ${tenant_name}/ilm-detect ${tenant_name}/ilm-geodata
+echo ""
+
+echo "set rabbit mq"
+mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
+echo ""
+
+echo "sleep 5 s!"
+sleep 5
+
+mc admin service restart ${tenant_name}
+
+echo "sleep 5 s!"
+sleep 5
+echo ""
+
+
+echo "start to add event notification !"
+
+mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/ilm-detect arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/ilm-geodata arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
+
+mc ilm add --expiry-days "1" ${tenant_name}/tus
+
+echo ""
+echo "done of init !"
\ No newline at end of file
diff --git a/67-202508-雄安空能院/重要备份.sh b/67-202508-雄安空能院/重要备份.sh
new file mode 100644
index 0000000..a7eab33
--- /dev/null
+++ b/67-202508-雄安空能院/重要备份.sh
@@ -0,0 +1,3 @@
+
+
+%}3}vbJXWv
\ No newline at end of file
diff --git a/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh b/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh
new file mode 100644
index 0000000..f05fe68
--- /dev/null
+++ b/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+set -eo pipefail
+
+# 定义脚本参数
+DOCKER_VERSION="20.10" # 在这里修改期望的版本
+UBUNTU_IDS=("18.04" "20.04" "22.04" "24.04")
+ALIYUN_MIRROR="https://mirrors.aliyun.com"
+DOCKER_COMPOSE_VERSION="2.26.1"
+
+# 1. 检测Ubuntu环境
+check_ubuntu() {
+ if ! command -v lsb_release &> /dev/null || [[ $(lsb_release -is) != "Ubuntu" ]]; then
+ echo "错误:本脚本仅支持Ubuntu系统"
+ exit 1
+ fi
+
+ local version_id=$(lsb_release -rs)
+ if [[ ! " ${UBUNTU_IDS[*]} " =~ " ${version_id} " ]]; then
+ echo "错误:不支持的Ubuntu版本 ${version_id},支持版本:${UBUNTU_IDS[*]}"
+ exit 1
+ fi
+}
+
+# 2. 替换阿里云源
+set_aliyun_mirror() {
+ sudo sed -i "s/archive.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
+ sudo sed -i "s/security.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
+ sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates
+}
+
+# 3. 准备Docker仓库
+prepare_docker_env() {
+ sudo mkdir -p /etc/apt/keyrings
+ curl -fsSL $ALIYUN_MIRROR/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
+
+ local codename=$(lsb_release -cs)
+ echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $ALIYUN_MIRROR/docker-ce/linux/ubuntu $codename stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+ sudo apt-get update
+}
+
+# 4. 版本解析优化版本
+get_docker_version() {
+ local target_version=""
+ if [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then
+ # 提取大版本下最高小版本
+ target_version=$(apt-cache madison docker-ce \
+ | awk -F'|' '{gsub(/ /,"",$2); print $2}' \
+ | grep -E "^[0-9]+:${DOCKER_VERSION}([.-]|\~\w+)" \
+ | sort -rV \
+ | head -1)
+ elif [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
+ # 精确版本匹配
+ target_version=$(apt-cache madison docker-ce \
+ | awk -F'|' '{gsub(/ /,"",$2); print $2}' \
+ | grep -E "^[0-9]+:${DOCKER_VERSION}.*$(lsb_release -cs)" )
+ fi
+
+ [ -z "$target_version" ] && echo "错误:找不到Docker版本 $DOCKER_VERSION" && exit 1
+ echo "$target_version" | sed 's/^[0-9]+://' # 去除前缀
+}
+
+# 5. 主流程
+main() {
+ check_ubuntu
+ echo "-- 设置阿里云源 --"
+ set_aliyun_mirror
+
+ echo "-- 准备Docker仓库 --"
+ prepare_docker_env
+
+ echo "-- 解析Docker版本 --"
+ local full_version=$(get_docker_version)
+ echo "选择版本:$full_version"
+
+ echo "-- 安装组件 --"
+ sudo apt-get install -y \
+ docker-ce-cli="$full_version" \
+ docker-ce="$full_version" \
+ docker-ce-rootless-extras="$full_version" \
+ containerd.io \
+ docker-buildx-plugin \
+ docker-compose-plugin
+
+ echo "-- 安装docker-compose --"
+ sudo curl -sSL "https://get.daocloud.io/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
+ sudo chmod +x /usr/local/bin/docker-compose
+
+ echo "-- 禁用自动更新 --"
+ sudo apt-mark hold docker-ce docker-ce-cli containerd.io
+
+ echo "-- 启动服务 --"
+ sudo systemctl enable docker && sudo systemctl start docker
+
+ echo -e "\n=== 安装完成 ==="
+ docker --version
+ docker-compose --version
+}
+
+main
+
+
+
+
+请写一个shell,基于上述的部分安装逻辑,实现如下的功能
+脚本前面提取变量 docker的版本号 20.10.15 或 20.10(安装小版本最高的版本)
+1. 检测当前主机是否是ubuntu环境,本脚本支支持Ubuntu
+2. 获取本机的版本号,支持ubuntu18.04 20.04 22.04 24.04的版本
+3. 根据ubuntu版本修改,apt的镜像源为阿里源
+4. 在线安装符合变量版本的docker,在线安装docker-compose,安装常用的插件
+5. 禁止docker自动更新
diff --git a/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh b/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh
index 55db920..32c081f 100644
--- a/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh
+++ b/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh
@@ -28,30 +28,16 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base harbor install
+# 主节点执行
+# 安装octopus-agent
+mv agent-wdd_linux_amd64 /usr/local/bin/agent-wdd
+chmod +x /usr/local/bin/agent-wdd
+
+# 主节点安装ssh-key
+/usr/local/bin/agent-wdd base ssh config
+/usr/local/bin/agent-wdd base ssh key
+
# 批量执行命令
-
-host_list=(
- 172.16.100.56
- 172.16.100.57
- 172.16.100.58
-)
-
-
-host_list=(
- 172.16.100.62
- 172.16.100.51
- 172.16.100.52
- 172.16.100.53
- 172.16.100.54
- 172.16.100.55
- 172.16.100.56
- 172.16.100.57
- 172.16.100.58
- 172.16.100.59
- 172.16.100.60
- 172.16.100.61
-)
-
host_list=(
172.16.100.56
172.16.100.57
@@ -63,6 +49,9 @@ for server in "${host_list[@]}";do
echo ""
done
+
+# 主节点批量安装key
+
# 复制 同步文件
export server=172.16.100.62
@@ -70,6 +59,14 @@ scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
+
+# 安装docker-compose
+mv docker-compose-linux-x86_64 /usr/local/bin/docker-compose
+chmod +x /usr/local/bin/docker-compose
+
+# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
+
+
# 磁盘初始化
ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/disk.sh root@${server}:/root/wdd/
@@ -83,7 +80,7 @@ scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
-# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
+
ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
@@ -102,4 +99,21 @@ ssh root@${server} "cat /etc/docker/daemon.json"
ssh root@${server} "systemctl restart docker"
ssh root@${server} "docker info"
-wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
\ No newline at end of file
+wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
+
+
+# 主节点执行 安装harbor仓库
+/usr/local/bin/agent-wdd base harbor install
+
+# 安装rke kubectl
+mv /root/wdd/rke_amd64 /usr/local/bin/rke
+chmod +x /usr/local/bin/rke
+
+mv /root/wdd/kubectl /usr/local/bin/kubectl
+chmod +x /usr/local/bin/kubectl
+
+
+# 安装 k8s-证书
+
+
+curl -s https://172.29.137.125
\ No newline at end of file
diff --git a/998-常用脚本/a-部署脚本/deploy-nfs-server.sh b/998-常用脚本/a-部署脚本/deploy-nfs-server.sh
index 7ea7e9a..2d22403 100644
--- a/998-常用脚本/a-部署脚本/deploy-nfs-server.sh
+++ b/998-常用脚本/a-部署脚本/deploy-nfs-server.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-#nfs_data_path="/var/lib/docker/nfs_data"
-nfs_data_path="/data/nfs_data"
+nfs_data_path="/var/lib/docker/nfs_data"
+#nfs_data_path="/data/nfs_data"
deploy_nfs_server(){
diff --git a/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf b/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf
index 7a2e590..79c7c63 100644
--- a/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf
+++ b/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf
@@ -1,8 +1,9 @@
upstream proxy_server {
ip_hash;
- server 172.16.100.55:30500;
- server 172.16.100.59:30500;
- server 172.16.100.60:30500;
+ server 192.168.0.2:30500;
+ server 192.168.0.4:30500;
+ server 192.168.0.5:30500;
+ server 192.168.0.6:30500;
}
server {
@@ -21,7 +22,7 @@ server {
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
- proxy_set_header Host fake-domain.eedsjc-uavms.io;
+ proxy_set_header Host fake-domain.xakny.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
diff --git a/998-常用脚本/a-部署脚本/x_minio初始化.sh b/998-常用脚本/a-部署脚本/x_minio初始化.sh
new file mode 100644
index 0000000..d6cbbf9
--- /dev/null
+++ b/998-常用脚本/a-部署脚本/x_minio初始化.sh
@@ -0,0 +1,50 @@
+export tenant_name=outside
+export inner_master_ip=Master节点的内网IP
+export minio_host_ip=MINIO的内网IP
+
+mc alias set ${tenant_name} http://${minio_host_ip}:9000 cmii B#923fC7mk
+
+
+mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata ${tenant_name}/ilm-detect ${tenant_name}/ilm-geodata
+echo ""
+
+echo "set rabbit mq"
+mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
+echo ""
+
+echo "sleep 5 s!"
+sleep 5
+
+mc admin service restart ${tenant_name}
+
+echo "sleep 5 s!"
+sleep 5
+echo ""
+
+
+echo "start to add event notification !"
+
+mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/ilm-detect arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/ilm-geodata arn:minio:sqs::1:amqp --event put
+
+mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
+
+mc ilm add --expiry-days "1" ${tenant_name}/tus
+
+echo ""
+echo "done of init !"
\ No newline at end of file
diff --git a/998-常用脚本/a-部署脚本/z_执行apply命令.sh b/998-常用脚本/a-部署脚本/z_执行apply命令.sh
index 3b5335d..20b0f12 100644
--- a/998-常用脚本/a-部署脚本/z_执行apply命令.sh
+++ b/998-常用脚本/a-部署脚本/z_执行apply命令.sh
@@ -18,6 +18,9 @@ kubectl delete -f k8s-nfs-test.yaml
cd /var/lib/docker/nfs_data
+
+kubectl create ns xakny
+
kubectl apply -f k8s-pvc.yaml
kubectl delete -f k8s-pvc.yaml
@@ -37,13 +40,33 @@ kubectl delete -f k8s-redis.yaml
kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml
+
----
+ doris部署
+---
+kubectl apply -f doris-pvc.yaml
+kubectl apply -f doris-fe-configmap.yaml
+kubectl apply -f doris-be-configmap.yaml
+kubectl apply -f doris-be-internal-service.yaml
+kubectl apply -f doris-be-service.yaml
+kubectl apply -f doris-fe-internal-service.yaml
+kubectl apply -f doris-fe-service.yaml
+
+
+kubectl apply -f doris-fe-statusfulset.yaml
+kubectl delete -f doris-fe-statusfulset.yaml
+
+kubectl apply -f doris-be-statusfulset.yaml
+kubectl delete -f doris-be-statusfulset.yaml
+
+---
+数据库初始化
+---
+
kubectl apply -f k8s-nacos.yaml
kubectl delete -f k8s-nacos.yaml
----
-
vim k8s-configmap.yaml
kubectl apply -f k8s-configmap.yaml
kubectl delete -f k8s-configmap.yaml
diff --git a/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh b/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh
index fa74050..5b378cd 100644
--- a/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh
+++ b/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh
@@ -1,6 +1,6 @@
-export harbor_host=172.16.100.55:8033
+export harbor_host=192.168.0.2:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects
diff --git a/998-常用脚本/a-部署脚本/编辑calico状态.sh b/998-常用脚本/a-部署脚本/编辑calico状态.sh
index 52398e4..9e7f06b 100644
--- a/998-常用脚本/a-部署脚本/编辑calico状态.sh
+++ b/998-常用脚本/a-部署脚本/编辑calico状态.sh
@@ -8,7 +8,7 @@ env:
value: "eth0"
# 更加保险
-kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens18
+kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
# 删除所有的calico pod
diff --git a/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh b/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh
index 901109a..c86ad38 100644
--- a/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh
+++ b/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh
@@ -5,7 +5,7 @@ gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
-DockerRegisterDomain="172.16.100.55:8033" # 需要根据实际修改
+DockerRegisterDomain="192.168.0.2:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
@@ -116,9 +116,9 @@ Load_Tag_Upload(){
shift # past argument
;;
cmii)
- local_gzip_path="$local_gzip_path/uavms-2.0"
+ local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
- oss_prefix_url="$oss_prefix_url/uavms-2.0/"
+ oss_prefix_url="$oss_prefix_url/cmii/"
ltu
shift # past argument
;;
@@ -163,6 +163,6 @@ test(){
}
# test
-#Download_Load_Tag_Upload "cmii"
+Download_Load_Tag_Upload "rke"
- Load_Tag_Upload "cmii"
\ No newline at end of file
+# Load_Tag_Upload "cmii"
\ No newline at end of file
diff --git a/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh b/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh
index 3b90efc..7afdb9e 100644
--- a/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh
+++ b/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
-name_space=szgz
+name_space=zjyd
delete_all_fronted_cmii_pod(){
diff --git a/998-常用脚本/磁盘脚本/0-挂载磁盘.sh b/998-常用脚本/磁盘脚本/0-挂载磁盘.sh
index 7ac1c83..4cd13c0 100644
--- a/998-常用脚本/磁盘脚本/0-挂载磁盘.sh
+++ b/998-常用脚本/磁盘脚本/0-挂载磁盘.sh
@@ -20,7 +20,7 @@
# ## #自动扩展XFS文件系统到最大的可用大小
-# xfs_growfs /dev/mapper/centos-root
+# xfs_growfs /dev/mapper/centos-r oot
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
@@ -72,9 +72,9 @@ echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
-
+s
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
-# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
+# lvextend -l +100%FREE /dev/mapper/s${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'
diff --git a/998-常用脚本/磁盘脚本/1-高级-磁盘挂载.sh b/998-常用脚本/磁盘脚本/1-高级-磁盘挂载.sh
new file mode 100644
index 0000000..18eda45
--- /dev/null
+++ b/998-常用脚本/磁盘脚本/1-高级-磁盘挂载.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+set -e
+
+# 用户配置部分
+DISK="/dev/sdb" # 要操作的物理磁盘(请根据实际情况修改)
+MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
+FS_TYPE="ext4" # 文件系统类型(支持ext4/xfs,默认ext4)
+
+#----------------------------------------------------------
+# 核心逻辑(建议非必要不修改)
+#----------------------------------------------------------
+
+function check_prerequisites() {
+ # 必须root权限运行检查
+ [[ $EUID -ne 0 ]] && echo -e "\033[31m错误:必须使用root权限运行此脚本\033[0m" && exit 1
+
+ # 磁盘存在性检查
+ [[ ! -b "$DISK" ]] && echo -e "\033[31m错误:磁盘 $DISK 不存在\033[0m" && exit 1
+
+ # 文件系统类型校验
+ if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
+ echo -e "\033[31m错误:不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
+ exit 1
+ fi
+}
+
+function prepare_disk() {
+ local partition="${DISK}1"
+
+ echo -e "\033[34m正在初始化磁盘分区...\033[0m"
+ parted "$DISK" --script mklabel gpt
+ parted "$DISK" --script mkpart primary 0% 100%
+ parted "$DISK" --script set 1 lvm on
+ partprobe "$DISK" # 确保系统识别新分区表
+
+ echo -e "\033[34m正在创建LVM结构...\033[0m"
+ pvcreate "$partition"
+ vgcreate datavg "$partition"
+ lvcreate -y -l 100%FREE -n lvdata datavg
+}
+
+function format_and_mount() {
+ echo -e "\033[34m格式化逻辑卷...\033[0m"
+ if [[ "$FS_TYPE" == "ext4" ]]; then
+ mkfs.ext4 -F "/dev/datavg/lvdata"
+ else
+ mkfs.xfs -f "/dev/datavg/lvdata"
+ fi
+
+ echo -e "\033[34m设置挂载配置...\033[0m"
+ mkdir -p "$MOUNT_PATH"
+ UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
+ echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
+ mount -a
+}
+
+function verify_result() {
+ echo -e "\n\033[1;36m最终验证结果:\033[0m"
+ lsblk -f "$DISK"
+ echo -e "\n磁盘空间使用情况:"
+ df -hT "$MOUNT_PATH"
+}
+
+# 主执行流程
+check_prerequisites
+prepare_disk
+format_and_mount
+verify_result
+
+echo -e "\n\033[32m操作执行完毕,请仔细核查上述输出信息\033[0m"
+
+
+
+#请写一个shell脚本,脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
+#1.将物理磁盘的盘符修改为gpt格式
+#2.将物理磁盘全部空间创建一个分区,分区格式为lvm
+#3.将分区分配给逻辑卷datavg
+#4.将datavg所有可用的空间分配给逻辑卷lvdata
+#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
+#6.创建变量挂载点路径
+#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
+#8.执行lsblk和df -TH查看分区是否正确挂载
+
+
diff --git a/999-数据库脚本/z_database_execute.sh b/999-数据库脚本/z_database_execute.sh
index d495665..9c7900e 100644
--- a/999-数据库脚本/z_database_execute.sh
+++ b/999-数据库脚本/z_database_execute.sh
@@ -4,15 +4,12 @@
# 修改目录的权限为
-
-export sql_file_folder_name=uavms
-chown 1001:1001 /var/lib/docker/mysql-pv/ynydapp/${sql_file_folder_name}/
-# 然后执行mysql的pod
-
-INSERT INTO `uav_lifecycle`.`regulator` (`id`, `name`, `is_system_admin`, `telephone`, `avatar_url`, `authentication_status`, `authentication_time`, `password`, `password_modify_time`, `is_frozen`, `is_del`, `create_at`, `create_by`, `update_at`, `update_by`) VALUES (1, '超级管理员', b'1', LOWER(HEX(AES_ENCRYPT('13800000000','TELEPHONE'))), NULL, 0, NULL, '$2a$10$zaAxaqvNzx8HdERMTrOF6u.InuKLSSi2VGQDBmYuEIG56ZqV6TwBu', NOW(), b'0', b'0', NOW(), 'r_1', NOW(), 'r_1');
-
-export sql_file_folder_name=uas-2.0
export local_mysql_host_path="/var/lib/docker/mysql-pv/$sql_file_folder_name"
+export sql_file_folder_name=2.1
+chown 1001:1001 /var/lib/docker/mysql-pv/xakny/${sql_file_folder_name}/
+
+# 然后执行mysql的pod
+export sql_file_folder_name=2.1
export sql_import_file_path="/bitnami/mysql/${sql_file_folder_name}"
for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do
echo "current file is ${sql_file}"
@@ -23,6 +20,12 @@ for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do
echo ""
done
+INSERT INTO `uav_lifecycle`.`regulator` (`id`, `name`, `is_system_admin`, `telephone`, `avatar_url`, `authentication_status`, `authentication_time`, `password`, `password_modify_time`, `is_frozen`, `is_del`, `create_at`, `create_by`, `update_at`, `update_by`) VALUES (1, '超级管理员', b'1', LOWER(HEX(AES_ENCRYPT('13800000000','TELEPHONE'))), NULL, 0, NULL, '$2a$10$zaAxaqvNzx8HdERMTrOF6u.InuKLSSi2VGQDBmYuEIG56ZqV6TwBu', NOW(), b'0', b'0', NOW(), 'r_1', NOW(), 'r_1');
+
+
+# doris初始化
+mysql -uroot -hdoris-cluster-fe-internal -P9030 < 1node_table_init.sql
+
# nacos备份
## 在pod里面执行
mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P3306 -t --set-gtid-purged=OFF cmii_nacos_config config_info his_config_info roles users > ${sql_import_file_path}/cmii_nacos_config_wdd.sql
diff --git a/999-部署模板/kubectl b/999-部署模板/kubectl
deleted file mode 100644
index ad3c248..0000000
Binary files a/999-部署模板/kubectl and /dev/null differ
diff --git a/999-部署模板/rke b/999-部署模板/rke
deleted file mode 100644
index 1f709b7..0000000
Binary files a/999-部署模板/rke and /dev/null differ