diff --git a/agent-common/image/ImageNameConvert.go b/agent-common/image/ImageNameConvert.go index 778222e..26d0a53 100755 --- a/agent-common/image/ImageNameConvert.go +++ b/agent-common/image/ImageNameConvert.go @@ -20,16 +20,27 @@ func ImageFullNameToAppName(imageFullName string) (appName string) { // 10.1.1.1:8033/cmii/ok:1.2 不支持 不允许存在 // rancher/fleet:v0.3.4 - // ossr/srs:v5.0.1 ==> docker=cmii=srs=v5.0.1.tar.gz + // ossr/srs:v5.0.1 // nginx:latest // bitnami/minio:2022.5.4 // simonrupf/chronyd:0.4.3 - if strings.HasPrefix(imageFullName, CmiiHarborPrefix) { - return strings.Split(strings.TrimPrefix(imageFullName, CmiiHarborPrefix), ":")[0] + s := strings.Split(imageFullName, ":") + if len(s) == 1 { + // nginx + return imageFullName } + // 10.1.1.1:8033/cmii/ok:1.2 + // harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0 ==> cmii-uav-platform + // rancher/fleet:v0.3.4 + // ossr/srs:v5.0.1 + // nginx:latest + // bitnami/minio:2022.5.4 + // simonrupf/chronyd:0.4.3 - return "" + middle := s[len(s)-2] + split := strings.Split(middle, "/") + return split[len(split)-1] } func ImageFullNameToImageTag(imageFullName string) (imageTag string) { @@ -43,6 +54,27 @@ func ImageFullNameToImageTag(imageFullName string) (imageTag string) { return s } +func ImageFullNameToImageNameAndTag(imageFullName string) (imageName, imageTag string) { + + s := strings.Split(imageFullName, ":") + if len(s) == 1 { + // nginx + return imageFullName, "latest" + } + // 10.1.1.1:8033/cmii/ok:1.2 + // harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0 ==> cmii-uav-platform + // rancher/fleet:v0.3.4 + // ossr/srs:v5.0.1 + // nginx:latest + // bitnami/minio:2022.5.4 + // simonrupf/chronyd:0.4.3 + + middle := s[len(s)-2] + split := strings.Split(middle, "/") + return split[len(split)-1], s[len(s)-1] + +} + // ImageFullNameToGzipFileName 必须输出长度为4的内容 =出现得次数为3 func ImageFullNameToGzipFileName(imageFullName string) (gzipFileName string) { @@ -68,8 +100,11 @@ func ImageFullNameToGzipFileName(imageFullName string) (gzipFileName string) { if len(first) == 3 { // harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0 // docker.io/ossr/srs:v5.0.1 + // docker.107421.xyz/rancher/calico-cni:v3.17.2 if strings.HasPrefix(split[0], CmiiHarborPrefix) { gzipFileName += "cmlc=cmii=" + } else if strings.Contains(split[0], "rancher") { + gzipFileName += "docker=rancher=" } else { gzipFileName += "docker=cmii=" } @@ -79,6 +114,7 @@ func ImageFullNameToGzipFileName(imageFullName string) (gzipFileName string) { } else if len(first) == 4 { // harbor.cdcyy.cn/cmii/ossr/srs:v5.0.1 // harbor.cdcyy.com.cn/cmii/cmlc-ai/cmlc-ai-operator:v5.2.0-t4-no-dino + if !strings.HasPrefix(split[0], CmiiHarborPrefix) { return imageFullName } @@ -145,7 +181,7 @@ func ImageNameToTargetImageFullName(imageName, targetHarborHost string) string { //// srs:v4.0.5 //// cmii-uav-platform:5.4.0 //s := targetHostFullName + "/cmii/" + imageFullName - log.InfoF("ImageFullName: [%s] to TargetImageFullName: [%s]", imageName, targetImageName) + //log.InfoF("ImageFullName: [%s] to TargetImageFullName: [%s]", imageName, targetImageName) return targetImageName } @@ -226,3 +262,35 @@ func GzipFolderPathToCmiiImageTagMaps(gzipFolderPath string) (frontendImageVersi return frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap } + +func CmiiImageMapFromImageFullNameList(cmiiImageFullNameList []string) (cmiiImageVersionMap map[string]string) { + + cmiiImageVersionMap = make(map[string]string) + for _, imageFullName := range cmiiImageFullNameList { + imageName, imageTag := ImageFullNameToImageNameAndTag(imageFullName) + cmiiImageVersionMap[imageName] = imageTag + } + + return cmiiImageVersionMap +} + +func FrontendBackendSrsImageMapFromCmiiImageMap(cmiiImageVersionMap map[string]string) (frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap map[string]string) { + + frontendImageVersionMap = make(map[string]string) + backendImageVersionMap = make(map[string]string) + srsImageVersionMap = make(map[string]string) + + for imageName, imageTag := range cmiiImageVersionMap { + if strings.Contains(imageName, "platform") { + frontendImageVersionMap[imageName] = imageTag + } else if strings.Contains(imageName, "srs") { + srsImageVersionMap[imageName] = imageTag + } else if strings.Contains(imageName, "operator") { + srsImageVersionMap[imageName] = imageTag + } else { + backendImageVersionMap[imageName] = imageTag + } + } + + return frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap +} diff --git a/agent-common/image/ImageNameConvert_test.go b/agent-common/image/ImageNameConvert_test.go index 43e1b8d..1230e10 100755 --- a/agent-common/image/ImageNameConvert_test.go +++ b/agent-common/image/ImageNameConvert_test.go @@ -6,24 +6,29 @@ import ( "path/filepath" "strings" "testing" + "wdd.io/agent-common/real_project/zhejianyidong_erjipingtai" + "wdd.io/agent-common/real_project/zjjt" "wdd.io/agent-common/utils" ) -func TestImageFullNameToGzipFileName(t *testing.T) { - test := []string{ - "bitnami/redis:6.2.6-debian-10-r0", - "simonrupf/chronyd:0.4.3", - "harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0", - "harbor.cdcyy.com.cn/cmii/ossrs/srs:v4.0.136", - "ossrs/srs:v4.0.136", - "mongo:5.0", - "bitnami/minio:2023.5.4", - "busybox:latest", - "busybox", - "rancher/rancher:v2.7.0", - } +var imageFullNameList = []string{ + //"bitnami/redis:6.2.6-debian-10-r0", + //"simonrupf/chronyd:0.4.3", + //"harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0", + //"harbor.cdcyy.com.cn/cmii/ossrs/srs:v4.0.136", + //"ossrs/srs:v4.0.136", + //"mongo:5.0", + //"bitnami/minio:2023.5.4", + //"busybox:latest", + //"busybox", + //"rancher/rancher:v2.7.0", + //"10.1.1.1:8033/cmii/ok:1.2", + "docker.107421.xyz/rancher/shell:v0.1.6", +} - for _, s := range test { +func TestImageFullNameToGzipFileName(t *testing.T) { + + for _, s := range imageFullNameList { gzipFileName := ImageFullNameToGzipFileName(s) fmt.Println(gzipFileName) } @@ -244,3 +249,36 @@ func TestImageGzipFileNameToImageFullName(t *testing.T) { utils.BeautifulPrint(frontendMap) utils.BeautifulPrint(srsMap) } + +func TestFrontendBackendImageMapFromCmiiImageMap(t *testing.T) { + frontendImageVersionMap, backendImageVersionMap, _ := FrontendBackendSrsImageMapFromCmiiImageMap(zjjt.CmiiImageMap) + + utils.BeautifulPrint(frontendImageVersionMap) + utils.BeautifulPrint(backendImageVersionMap) +} + +func TestImageFullNameToImageNameAndTag(t *testing.T) { + + for _, s := range imageFullNameList { + imageName, imageTag := ImageFullNameToImageNameAndTag(s) + fmt.Printf("%-8s %-8s %-8s\n", imageName, imageTag, s) + + } +} + +func TestImageFullNameToAppName(t *testing.T) { + + for _, s := range imageFullNameList { + imageName := ImageFullNameToAppName(s) + fmt.Printf("%-8s %-8s\n", imageName, s) + + } +} + +func TestCmiiImageMapFromImageFullNameList(t *testing.T) { + imageList := zhejianyidong_erjipingtai.Cmii570ImageList + + cmiiImageVersionMap := CmiiImageMapFromImageFullNameList(imageList) + + utils.BeautifulPrint(cmiiImageVersionMap) +} diff --git a/agent-common/real_project/bgtg/Config.go b/agent-common/real_project/bgtg/Config.go new file mode 100755 index 0000000..4e0095e --- /dev/null +++ b/agent-common/real_project/bgtg/Config.go @@ -0,0 +1,140 @@ +package bgtg + +var AllCmiiImageTagList = []string{ + "cmii-uav-tower:5.4.0-0319", + "cmii-uav-platform-logistics:5.4.0", + "cmii-uav-platform-qinghaitourism:4.1.0-21377-0508", + "cmii-uav-platform-securityh5:5.4.0", + "cmii-uav-platform:5.4.0-25263-041102", + "cmii-uav-platform-ai-brain:5.4.0", + "cmii-uav-emergency:5.3.0", + "cmii-uav-kpi-monitor:5.4.0", + "cmii-uav-platform-splice:5.4.0-040301", + "cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427", + "cmii-live-operator:5.2.0", + "cmii-uav-gateway:5.4.0", + "cmii-uav-platform-security:4.1.6", + "cmii-uav-integration:5.4.0-25916", + "cmii-uav-notice:5.4.0", + "cmii-uav-platform-open:5.4.0", + "cmii-srs-oss-adaptor:2023-SA", + "cmii-admin-gateway:5.4.0", + "cmii-uav-process:5.4.0-0410", + "cmii-suav-supervision:5.4.0-032501", + "cmii-uav-platform-cms-portal:5.4.0", + "cmii-uav-platform-multiterminal:5.4.0", + "cmii-admin-data:5.4.0-0403", + "cmii-uav-cloud-live:5.4.0", + "cmii-uav-grid-datasource:5.2.0-24810", + "cmii-uav-platform-qingdao:4.1.6-24238-qingdao", + "cmii-admin-user:5.4.0", + "cmii-uav-industrial-portfolio:5.4.0-28027-041102", + "cmii-uav-alarm:5.4.0-0409", + "cmii-uav-clusters:5.2.0", + "cmii-uav-platform-oms:5.4.0", + "cmii-uav-platform-hljtt:5.3.0-hjltt", + "cmii-uav-platform-mws:5.4.0", + "cmii-uav-autowaypoint:4.1.6-cm", + "cmii-uav-grid-manage:5.1.0", + "cmii-uav-platform-share:5.4.0", + "cmii-uav-cms:5.3.0", + "cmii-uav-oauth:5.4.0-032901", + "cmii-open-gateway:5.4.0", + "cmii-uav-data-post-process:5.4.0", + "cmii-uav-multilink:5.4.0-032701", + "cmii-uav-platform-media:5.4.0", + "cmii-uav-platform-visualization:5.2.0", + "cmii-uav-platform-emergency-rescue:5.2.0", + "cmii-app-release:4.2.0-validation", + "cmii-uav-device:5.4.0-28028-0409", + "cmii-uav-gis-server:5.4.0", + "cmii-uav-brain:5.4.0", + "cmii-uav-depotautoreturn:5.4.0", + "cmii-uav-threedsimulation:5.1.0", + "cmii-uav-grid-engine:5.1.0", + "cmii-uav-developer:5.4.0-040701", + "cmii-uav-waypoint:5.4.0-032901", + "cmii-uav-platform-base:5.4.0", + "cmii-uav-platform-threedsimulation:5.2.0-21392", + "cmii-uav-platform-detection:5.4.0", + "cmii-uav-logger:5.4.0-0319", + "cmii-uav-platform-seniclive:5.2.0", + "cmii-suav-platform-supervisionh5:5.4.0", + "cmii-uav-user:5.4.0", + "cmii-uav-surveillance:5.4.0-28028-0409", + "cmii-uav-mission:5.4.0-28028-041006", + "cmii-uav-mqtthandler:5.4.0-25916-041001", + "cmii-uav-material-warehouse:5.4.0-0407", + "cmii-uav-platform-armypeople:5.4.0-041201", + "cmii-suav-platform-supervision:5.4.0", + "cmii-uav-airspace:5.4.0-0402", +} + +var AllCMiiImageFullNameList560 = []string{ + "harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.7.0-30015-29835-071601", + "harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.5.0-30015-061801", + "harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:5.6.0-0716", + "harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.6.0-061202", + "harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.6.0-062401", + "harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.6.0-062602", + "harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.6.0-062601", + "harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.6.0-0704", + "harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.6.0-0708", + "harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.6.0-0704", + "harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.6.0-060601", + "harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.6.0-30015-070801", + "harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.6.0-0715", + "harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.6.0-071601", + "harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.5.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.6.0-0704", + "harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810", + "harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.6.0-30067-071604", + "harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.6.0-0704", + "harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.5.0", + "harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.6.0-070401", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.6.0-0710", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.6.0-0704", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.6.0-29267-0717", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0", + "harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.6.0-0708", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:5.6.0-0709", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:5.6.0-0709", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.6.0-28028-071102", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0", + "harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195", + "harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA", +} diff --git a/agent-common/real_project/bjtg/k8s-backend.yaml b/agent-common/real_project/bjtg/k8s-backend.yaml new file mode 100644 index 0000000..0838253 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-backend.yaml @@ -0,0 +1,6116 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mission + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mission + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mission + image: 10.250.0.200:8033/cmii/cmii-uav-mission:5.5.0-30015-061801 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-mission + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-mission + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mission + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mission + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-integration + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-integration + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-integration + image: 10.250.0.200:8033/cmii/cmii-uav-integration:5.7.0-30015-29835-071601 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-integration + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-integration + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-integration + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-integration + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-kpi-monitor + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-kpi-monitor + image: 10.250.0.200:8033/cmii/cmii-uav-kpi-monitor:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-kpi-monitor + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-kpi-monitor + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-kpi-monitor + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mqtthandler + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mqtthandler + image: 10.250.0.200:8033/cmii/cmii-uav-mqtthandler:5.6.0-30067-071604 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-mqtthandler + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-mqtthandler + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mqtthandler + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-multilink + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-multilink + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-multilink + image: 10.250.0.200:8033/cmii/cmii-uav-multilink:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-multilink + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-multilink + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-multilink + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-multilink + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-autowaypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-autowaypoint + image: 10.250.0.200:8033/cmii/cmii-uav-autowaypoint:4.2.0-beta + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-autowaypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-autowaypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-autowaypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-datasource + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-datasource + image: 10.250.0.200:8033/cmii/cmii-uav-grid-datasource:5.2.0-24810 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-grid-datasource + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-grid-datasource + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-datasource + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-sense-adapter + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-sense-adapter + image: 10.250.0.200:8033/cmii/cmii-uav-sense-adapter:5.6.0-0716 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-sense-adapter + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-sense-adapter + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-sense-adapter + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-clusters + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-clusters + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-clusters + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-clusters + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-clusters + image: 10.250.0.200:8033/cmii/cmii-uav-clusters:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-clusters + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-clusters + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-clusters + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-clusters + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-clusters + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-depotautoreturn + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-depotautoreturn + image: 10.250.0.200:8033/cmii/cmii-uav-depotautoreturn:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-depotautoreturn + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-depotautoreturn + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-depotautoreturn + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-logger + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-logger + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-logger + image: 10.250.0.200:8033/cmii/cmii-uav-logger:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-logger + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-logger + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-logger + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-logger + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-device + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-device + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-device + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-device + image: 10.250.0.200:8033/cmii/cmii-uav-device:5.6.0-0715 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-device + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-device + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-device + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-device + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-engine + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-engine + image: 10.250.0.200:8033/cmii/cmii-uav-grid-engine:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-grid-engine + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-grid-engine + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-engine + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-gateway + image: 10.250.0.200:8033/cmii/cmii-admin-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-admin-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-admin-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-emergency + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-emergency + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-emergency + image: 10.250.0.200:8033/cmii/cmii-uav-emergency:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-emergency + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-emergency + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-emergency + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-emergency + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-user + image: 10.250.0.200:8033/cmii/cmii-admin-user:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-admin-user + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-admin-user + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-waypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-waypoint + image: 10.250.0.200:8033/cmii/cmii-uav-waypoint:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-waypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-waypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-waypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-waypoint + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uas-lifecycle + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uas-lifecycle + image: 10.250.0.200:8033/cmii/cmii-uas-lifecycle:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uas-lifecycle + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uas-lifecycle + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uas-lifecycle + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-data + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-data + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-data + image: 10.250.0.200:8033/cmii/cmii-admin-data:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-admin-data + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-admin-data + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-data + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-data + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-alarm + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-alarm + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-alarm + image: 10.250.0.200:8033/cmii/cmii-uav-alarm:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-alarm + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-alarm + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-alarm + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-alarm + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-material-warehouse + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-material-warehouse + image: 10.250.0.200:8033/cmii/cmii-uav-material-warehouse:5.6.0-062602 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-material-warehouse + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-material-warehouse + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-material-warehouse + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-surveillance + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-surveillance + image: 10.250.0.200:8033/cmii/cmii-uav-surveillance:5.6.0-30015-070801 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-surveillance + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-surveillance + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-surveillance + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-surveillance + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-bridge + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-bridge + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-bridge + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-bridge + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-bridge + image: 10.250.0.200:8033/cmii/cmii-uav-bridge:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-bridge + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-bridge + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-bridge + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-bridge + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-bridge + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-industrial-portfolio + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-industrial-portfolio + image: 10.250.0.200:8033/cmii/cmii-uav-industrial-portfolio:5.6.0-071701 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-industrial-portfolio + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-industrial-portfolio + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-industrial-portfolio + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cloud-live + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cloud-live + image: 10.250.0.200:8033/cmii/cmii-uav-cloud-live:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-cloud-live + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-cloud-live + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cloud-live + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-threedsimulation + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-threedsimulation + image: 10.250.0.200:8033/cmii/cmii-uav-threedsimulation:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-threedsimulation + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-threedsimulation + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-threedsimulation + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-developer + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-developer + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-developer + image: 10.250.0.200:8033/cmii/cmii-uav-developer:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-developer + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-developer + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-developer + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-developer + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-iam-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-iam-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-iam-gateway + image: 10.250.0.200:8033/cmii/cmii-iam-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-iam-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-iam-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-iam-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-iam-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-manage + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-manage + image: 10.250.0.200:8033/cmii/cmii-uav-grid-manage:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-grid-manage + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-grid-manage + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-manage + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-tower + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-tower + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-tower + image: 10.250.0.200:8033/cmii/cmii-uav-tower:5.6.0-062601 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-tower + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-tower + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-tower + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-tower + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cms + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cms + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cms + image: 10.250.0.200:8033/cmii/cmii-uav-cms:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-cms + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-cms + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cms + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cms + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-app-release + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-app-release + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-app-release + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-app-release + image: 10.250.0.200:8033/cmii/cmii-app-release:4.2.0-validation + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-app-release + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-app-release + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-app-release + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-app-release + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-notice + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-notice + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-notice + image: 10.250.0.200:8033/cmii/cmii-uav-notice:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-notice + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-notice + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-notice + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-notice + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-user + image: 10.250.0.200:8033/cmii/cmii-uav-user:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-user + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-user + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-oauth + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-oauth + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-oauth + image: 10.250.0.200:8033/cmii/cmii-uav-oauth:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-oauth + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-oauth + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-oauth + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-oauth + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-brain + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-brain + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-brain + image: 10.250.0.200:8033/cmii/cmii-uav-brain:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-brain + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-brain + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-brain + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-brain + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-data-post-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-data-post-process + image: 10.250.0.200:8033/cmii/cmii-uav-data-post-process:5.6.0-062401 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-data-post-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-data-post-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-data-post-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uas-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uas-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uas-gateway + image: 10.250.0.200:8033/cmii/cmii-uas-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uas-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uas-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uas-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uas-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gis-server + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gis-server + image: 10.250.0.200:8033/cmii/cmii-uav-gis-server:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-gis-server + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-gis-server + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gis-server + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gis-server + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-airspace + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-airspace + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-airspace + image: 10.250.0.200:8033/cmii/cmii-uav-airspace:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-airspace + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-airspace + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-airspace + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-airspace + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gateway + image: 10.250.0.200:8033/cmii/cmii-uav-gateway:5.6.0-061202 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-process + image: 10.250.0.200:8033/cmii/cmii-uav-process:5.6.0-060601 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-process + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-open-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-open-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-open-gateway + image: 10.250.0.200:8033/cmii/cmii-open-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-open-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-open-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-open-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-open-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-supervision + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-suav-supervision + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - bjtg + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-supervision + image: 10.250.0.200:8033/cmii/cmii-suav-supervision:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-suav-supervision + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-suav-supervision + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-supervision + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-suav-supervision + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 diff --git a/agent-common/real_project/bjtg/k8s-configmap.yaml b/agent-common/real_project/bjtg/k8s-configmap.yaml new file mode 100644 index 0000000..ffcb739 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-configmap.yaml @@ -0,0 +1,420 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-media + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "media", + AppClientId: "APP_4AU8lbifESQO4FD6" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-splice + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "splice", + AppClientId: "APP_zE0M3sTRXrCIJS8Y" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-hljtt + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "hljtt", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uasms + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "uasms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uas + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "uas", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervision + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "supervision", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-detection + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "detection", + AppClientId: "APP_FDHW2VLVDWPnnOCy" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-ai-brain + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "ai-brain", + AppClientId: "APP_rafnuCAmBESIVYMH" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-armypeople + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "armypeople", + AppClientId: "APP_UIegse6Lfou9pO1U" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-logistics + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "logistics", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-threedsimulation + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "threedsimulation", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervisionh5 + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "supervisionh5", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-open + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "open", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-security + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "security", + AppClientId: "APP_JUSEMc7afyWXxvE7" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-qingdao + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "qingdao", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-share + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "share", + AppClientId: "APP_4lVSVI0ZGxTssir8" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-pangu + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-cmsportal + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "cmsportal", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-emergency + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "emergency", + AppClientId: "APP_aGsTAY1uMZrpKdfk" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-multiterminal + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "multiterminal", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-mws + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "mws", + AppClientId: "APP_uKniXPELlRERBBwK" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-seniclive + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "seniclive", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-qinghaitourism + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "qinghaitourism", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-visualization + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "visualization", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-dispatchh5 + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "dispatchh5", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-base + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "base", + AppClientId: "APP_9LY41OaKSqk2btY0" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-oms + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "oms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-securityh5 + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "securityh5", + AppClientId: "APP_N3ImO0Ubfu9peRHD" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-traffic + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "traffic", + AppClientId: "APP_Jc8i2wOQ1t73QEJS" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-jiangsuwenlv + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "bjtg", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "jiangsuwenlv", + AppClientId: "empty" + } diff --git a/agent-common/real_project/bjtg/k8s-dashboard.yaml b/agent-common/real_project/bjtg/k8s-dashboard.yaml new file mode 100644 index 0000000..1134919 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-dashboard.yaml @@ -0,0 +1,309 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + name: kubernetes-dashboard + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + nodePort: 30554 + selector: + k8s-app: kubernetes-dashboard + type: NodePort + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kube-system +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kube-system +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kube-system + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [ "" ] + resources: [ "secrets" ] + resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] + verbs: [ "get", "update", "delete" ] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [ "" ] + resources: [ "configmaps" ] + resourceNames: [ "kubernetes-dashboard-settings" ] + verbs: [ "get", "update" ] + # Allow Dashboard to get metrics. + - apiGroups: [ "" ] + resources: [ "services" ] + resourceNames: [ "heapster", "dashboard-metrics-scraper" ] + verbs: [ "proxy" ] + - apiGroups: [ "" ] + resources: [ "services/proxy" ] + resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] + verbs: [ "get" ] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: [ "metrics.k8s.io" ] + resources: [ "pods", "nodes" ] + verbs: [ "get", "list", "watch" ] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: kubernetes-dashboard + image: 10.250.0.200:8033/cmii/dashboard:v2.0.1 + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kube-system + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: { } + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: 10.250.0.200:8033/cmii/metrics-scraper:v1.0.4 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: { } +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: admin-user + namespace: kube-system diff --git a/agent-common/real_project/bjtg/k8s-emqx.yaml b/agent-common/real_project/bjtg/k8s-emqx.yaml new file mode 100644 index 0000000..77ac63b --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-emqx.yaml @@ -0,0 +1,274 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-emqxs + namespace: bjtg +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-env + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +data: + EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443" + EMQX_NAME: "helm-emqxs" + EMQX_CLUSTER__DISCOVERY: "k8s" + EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs" + EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless" + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" + EMQX_CLUSTER__K8S__namespace: "bjtg" + EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" + EMQX_ALLOW_ANONYMOUS: "false" + EMQX_ACL_NOMATCH: "deny" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-cm + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +data: + emqx_auth_mnesia.conf: |- + auth.mnesia.password_hash = sha256 + + # clientid 认证数据 + # auth.client.1.clientid = admin + # auth.client.1.password = 4YPk*DS%+5 + + ## username 认证数据 + auth.user.1.username = admin + auth.user.1.password = odD8#Ve7.B + auth.user.2.username = cmlc + auth.user.2.password = odD8#Ve7.B + + acl.conf: |- + {allow, {user, "admin"}, pubsub, ["admin/#"]}. + {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. + {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. + {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. + {allow, all}. + + loaded_plugins: |- + {emqx_auth_mnesia,true}. + {emqx_auth_mnesia,true}. + {emqx_management, true}. + {emqx_recon, true}. + {emqx_retainer, false}. + {emqx_dashboard, true}. + {emqx_telemetry, true}. + {emqx_rule_engine, true}. + {emqx_bridge_mqtt, false}. +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-emqxs + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + replicas: 1 + serviceName: helm-emqxs-headless + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + template: + metadata: + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + spec: + affinity: { } + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-emqxs + containers: + - name: helm-emqxs + image: 10.250.0.200:8033/cmii/emqx:4.4.9 + imagePullPolicy: Always + ports: + - name: mqtt + containerPort: 1883 + - name: mqttssl + containerPort: 8883 + - name: mgmt + containerPort: 8081 + - name: ws + containerPort: 8083 + - name: wss + containerPort: 8084 + - name: dashboard + containerPort: 18083 + - name: ekka + containerPort: 4370 + envFrom: + - configMapRef: + name: helm-emqxs-env + resources: { } + volumeMounts: + - name: emqx-data + mountPath: "/opt/emqx/data/mnesia" + readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf" + subPath: emqx_auth_mnesia.conf + readOnly: false + # - name: helm-emqxs-cm + # mountPath: "/opt/emqx/etc/acl.conf" + # subPath: "acl.conf" + # readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/data/loaded_plugins" + subPath: loaded_plugins + readOnly: false + volumes: + - name: emqx-data + persistentVolumeClaim: + claimName: helm-emqxs + - name: helm-emqxs-cm + configMap: + name: helm-emqxs-cm + items: + - key: emqx_auth_mnesia.conf + path: emqx_auth_mnesia.conf + - key: acl.conf + path: acl.conf + - key: loaded_plugins + path: loaded_plugins +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: bjtg +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - watch + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: bjtg +subjects: + - kind: ServiceAccount + name: helm-emqxs + namespace: bjtg +roleRef: + kind: Role + name: helm-emqxs + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + type: NodePort + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - port: 1883 + name: mqtt + targetPort: 1883 + nodePort: 31883 + - port: 18083 + name: dashboard + targetPort: 18083 + nodePort: 38085 + - port: 8083 + name: mqtt-websocket + targetPort: 8083 + nodePort: 38083 +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs-headless + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + clusterIP: None + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - name: mqtt + port: 1883 + protocol: TCP + targetPort: 1883 + - name: mqttssl + port: 8883 + protocol: TCP + targetPort: 8883 + - name: mgmt + port: 8081 + protocol: TCP + targetPort: 8081 + - name: websocket + port: 8083 + protocol: TCP + targetPort: 8083 + - name: wss + port: 8084 + protocol: TCP + targetPort: 8084 + - name: dashboard + port: 18083 + protocol: TCP + targetPort: 18083 + - name: ekka + port: 4370 + protocol: TCP + targetPort: 4370 diff --git a/agent-common/real_project/bjtg/k8s-frontend.yaml b/agent-common/real_project/bjtg/k8s-frontend.yaml new file mode 100644 index 0000000..3428229 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-frontend.yaml @@ -0,0 +1,2606 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-cm + namespace: bjtg + labels: + cmii.type: frontend +data: + nginx.conf: | + server { + listen 9528; + server_name localhost; + gzip on; + + location / { + root /home/cmii-platform/dist; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root html; + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-open + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-open + image: 10.250.0.200:8033/cmii/cmii-uav-platform-open:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-open + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-open + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-open + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-emergency-rescue + image: 10.250.0.200:8033/cmii/cmii-uav-platform-emergency-rescue:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-emergency-rescue + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-emergency + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-platform-supervision + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-platform-supervision + image: 10.250.0.200:8033/cmii/cmii-suav-platform-supervision:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-suav-platform-supervision + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-supervision + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-platform-supervision + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-oms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-oms + image: 10.250.0.200:8033/cmii/cmii-uav-platform-oms:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-oms + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-oms + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-oms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-dispatchh5 + image: 10.250.0.200:8033/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-dispatchh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-dispatchh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-base + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-base + image: 10.250.0.200:8033/cmii/cmii-uav-platform-base:5.4.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-base + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-base + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-base + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-cms-portal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-cms-portal + image: 10.250.0.200:8033/cmii/cmii-uav-platform-cms-portal:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-cms-portal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-cmsportal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-cms-portal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-qinghaitourism + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-qinghaitourism + image: 10.250.0.200:8033/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-qinghaitourism + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-qinghaitourism + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-qinghaitourism + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uasms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uasms + image: 10.250.0.200:8033/cmii/cmii-uav-platform-uasms:5.6.0-0709 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-uasms + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uasms + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uasms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-hljtt + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-hljtt + image: 10.250.0.200:8033/cmii/cmii-uav-platform-hljtt:5.3.0-hjltt + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-hljtt + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-hljtt + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-hljtt + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-qingdao + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-qingdao + image: 10.250.0.200:8033/cmii/cmii-uav-platform-qingdao:4.1.6-24238-qingdao + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-qingdao + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-qingdao + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-qingdao + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform + image: 10.250.0.200:8033/cmii/cmii-uav-platform:5.6.0-29267-0717 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-pangu + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-visualization + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-visualization + image: 10.250.0.200:8033/cmii/cmii-uav-platform-visualization:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-visualization + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-visualization + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-visualization + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-mws + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-mws + image: 10.250.0.200:8033/cmii/cmii-uav-platform-mws:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-mws + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-mws + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-mws + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-share + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-share + image: 10.250.0.200:8033/cmii/cmii-uav-platform-share:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-share + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-share + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-share + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uas + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uas + image: 10.250.0.200:8033/cmii/cmii-uav-platform-uas:5.6.0-0709 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-uas + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uas + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uas + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-threedsimulation + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-threedsimulation + image: 10.250.0.200:8033/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-threedsimulation + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-threedsimulation + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-threedsimulation + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-media + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-media + image: 10.250.0.200:8033/cmii/cmii-uav-platform-media:5.6.0-0710 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-media + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-media + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-media + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-detection + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-detection + image: 10.250.0.200:8033/cmii/cmii-uav-platform-detection:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-detection + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-detection + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-detection + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-securityh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-securityh5 + image: 10.250.0.200:8033/cmii/cmii-uav-platform-securityh5:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-securityh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-securityh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-securityh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-armypeople + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-armypeople + image: 10.250.0.200:8033/cmii/cmii-uav-platform-armypeople:5.6.0-28028-071102 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-armypeople + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-armypeople + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-armypeople + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-splice + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-splice + image: 10.250.0.200:8033/cmii/cmii-uav-platform-splice:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-splice + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-splice + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-splice + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-platform-supervisionh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-platform-supervisionh5 + image: 10.250.0.200:8033/cmii/cmii-suav-platform-supervisionh5:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-suav-platform-supervisionh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-supervisionh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-platform-supervisionh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-security + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-security + image: 10.250.0.200:8033/cmii/cmii-uav-platform-security:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-security + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-security + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-security + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-seniclive + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-seniclive + image: 10.250.0.200:8033/cmii/cmii-uav-platform-seniclive:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-seniclive + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-seniclive + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-seniclive + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-jiangsuwenlv + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-jiangsuwenlv + image: 10.250.0.200:8033/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-jiangsuwenlv + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-jiangsuwenlv + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-jiangsuwenlv + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-logistics + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-logistics + image: 10.250.0.200:8033/cmii/cmii-uav-platform-logistics:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-logistics + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-logistics + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-logistics + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-multiterminal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-multiterminal + image: 10.250.0.200:8033/cmii/cmii-uav-platform-multiterminal:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-multiterminal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-multiterminal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-multiterminal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-ai-brain + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-ai-brain + image: 10.250.0.200:8033/cmii/cmii-uav-platform-ai-brain:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-ai-brain + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-ai-brain + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-ai-brain + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 diff --git a/agent-common/real_project/bjtg/k8s-ingress.yaml b/agent-common/real_project/bjtg/k8s-ingress.yaml new file mode 100644 index 0000000..745a6e9 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-ingress.yaml @@ -0,0 +1,604 @@ +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: frontend-applications-ingress + namespace: bjtg + labels: + type: frontend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^(/supervision)$ $1/ redirect; + rewrite ^(/supervisionh5)$ $1/ redirect; + rewrite ^(/pangu)$ $1/ redirect; + rewrite ^(/ai-brain)$ $1/ redirect; + rewrite ^(/armypeople)$ $1/ redirect; + rewrite ^(/base)$ $1/ redirect; + rewrite ^(/cmsportal)$ $1/ redirect; + rewrite ^(/detection)$ $1/ redirect; + rewrite ^(/dispatchh5)$ $1/ redirect; + rewrite ^(/emergency)$ $1/ redirect; + rewrite ^(/hljtt)$ $1/ redirect; + rewrite ^(/jiangsuwenlv)$ $1/ redirect; + rewrite ^(/logistics)$ $1/ redirect; + rewrite ^(/media)$ $1/ redirect; + rewrite ^(/multiterminal)$ $1/ redirect; + rewrite ^(/mws)$ $1/ redirect; + rewrite ^(/oms)$ $1/ redirect; + rewrite ^(/open)$ $1/ redirect; + rewrite ^(/qingdao)$ $1/ redirect; + rewrite ^(/qinghaitourism)$ $1/ redirect; + rewrite ^(/security)$ $1/ redirect; + rewrite ^(/securityh5)$ $1/ redirect; + rewrite ^(/seniclive)$ $1/ redirect; + rewrite ^(/share)$ $1/ redirect; + rewrite ^(/splice)$ $1/ redirect; + rewrite ^(/threedsimulation)$ $1/ redirect; + rewrite ^(/traffic)$ $1/ redirect; + rewrite ^(/uas)$ $1/ redirect; + rewrite ^(/uasms)$ $1/ redirect; + rewrite ^(/visualization)$ $1/ redirect; +spec: + rules: + - host: fake-domain.bjtg.io + http: + paths: + - path: /bjtg/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /bjtg/supervision/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervision + servicePort: 9528 + - path: /bjtg/supervisionh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervisionh5 + servicePort: 9528 + - path: /bjtg/pangu/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /bjtg/ai-brain/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-ai-brain + servicePort: 9528 + - path: /bjtg/armypeople/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-armypeople + servicePort: 9528 + - path: /bjtg/base/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-base + servicePort: 9528 + - path: /bjtg/cmsportal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /bjtg/detection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-detection + servicePort: 9528 + - path: /bjtg/dispatchh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-dispatchh5 + servicePort: 9528 + - path: /bjtg/emergency/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-emergency-rescue + servicePort: 9528 + - path: /bjtg/hljtt/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-hljtt + servicePort: 9528 + - path: /bjtg/jiangsuwenlv/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-jiangsuwenlv + servicePort: 9528 + - path: /bjtg/logistics/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-logistics + servicePort: 9528 + - path: /bjtg/media/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-media + servicePort: 9528 + - path: /bjtg/multiterminal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-multiterminal + servicePort: 9528 + - path: /bjtg/mws/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws + servicePort: 9528 + - path: /bjtg/oms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-oms + servicePort: 9528 + - path: /bjtg/open/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-open + servicePort: 9528 + - path: /bjtg/qingdao/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qingdao + servicePort: 9528 + - path: /bjtg/qinghaitourism/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qinghaitourism + servicePort: 9528 + - path: /bjtg/security/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-security + servicePort: 9528 + - path: /bjtg/securityh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-securityh5 + servicePort: 9528 + - path: /bjtg/seniclive/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-seniclive + servicePort: 9528 + - path: /bjtg/share/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-share + servicePort: 9528 + - path: /bjtg/splice/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice + servicePort: 9528 + - path: /bjtg/threedsimulation/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-threedsimulation + servicePort: 9528 + - path: /bjtg/traffic/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-traffic + servicePort: 9528 + - path: /bjtg/uas/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uas + servicePort: 9528 + - path: /bjtg/uasms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uasms + servicePort: 9528 + - path: /bjtg/visualization/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-visualization + servicePort: 9528 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: backend-applications-ingress + namespace: bjtg + labels: + type: backend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" +spec: + rules: + - host: cmii-admin-data.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-data + servicePort: 8080 + - host: cmii-admin-gateway.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - host: cmii-admin-user.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-user + servicePort: 8080 + - host: cmii-app-release.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-app-release + servicePort: 8080 + - host: cmii-open-gateway.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - host: cmii-suav-supervision.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-supervision + servicePort: 8080 + - host: cmii-uas-gateway.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-gateway + servicePort: 8080 + - host: cmii-uas-lifecycle.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-lifecycle + servicePort: 8080 + - host: cmii-uav-airspace.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-airspace + servicePort: 8080 + - host: cmii-uav-alarm.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-alarm + servicePort: 8080 + - host: cmii-uav-autowaypoint.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-autowaypoint + servicePort: 8080 + - host: cmii-uav-brain.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-brain + servicePort: 8080 + - host: cmii-uav-bridge.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-bridge + servicePort: 8080 + - host: cmii-uav-cloud-live.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cloud-live + servicePort: 8080 + - host: cmii-uav-clusters.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-clusters + servicePort: 8080 + - host: cmii-uav-cms.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cms + servicePort: 8080 + - host: cmii-uav-data-post-process.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-data-post-process + servicePort: 8080 + - host: cmii-uav-depotautoreturn.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-depotautoreturn + servicePort: 8080 + - host: cmii-uav-developer.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-developer + servicePort: 8080 + - host: cmii-uav-device.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-device + servicePort: 8080 + - host: cmii-uav-emergency.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-emergency + servicePort: 8080 + - host: cmii-uav-gateway.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 + - host: cmii-uav-gis-server.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gis-server + servicePort: 8080 + - host: cmii-uav-grid-datasource.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-datasource + servicePort: 8080 + - host: cmii-uav-grid-engine.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-engine + servicePort: 8080 + - host: cmii-uav-grid-manage.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-manage + servicePort: 8080 + - host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-industrial-portfolio + servicePort: 8080 + - host: cmii-uav-integration.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-integration + servicePort: 8080 + - host: cmii-uav-kpi-monitor.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-kpi-monitor + servicePort: 8080 + - host: cmii-uav-logger.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-logger + servicePort: 8080 + - host: cmii-uav-material-warehouse.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-material-warehouse + servicePort: 8080 + - host: cmii-uav-mission.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mission + servicePort: 8080 + - host: cmii-uav-mqtthandler.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mqtthandler + servicePort: 8080 + - host: cmii-uav-multilink.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-multilink + servicePort: 8080 + - host: cmii-uav-notice.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-notice + servicePort: 8080 + - host: cmii-uav-oauth.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-oauth + servicePort: 8080 + - host: cmii-uav-process.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-process + servicePort: 8080 + - host: cmii-uav-sense-adapter.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-sense-adapter + servicePort: 8080 + - host: cmii-uav-surveillance.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-surveillance + servicePort: 8080 + - host: cmii-uav-threedsimulation.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-threedsimulation + servicePort: 8080 + - host: cmii-uav-tower.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-tower + servicePort: 8080 + - host: cmii-uav-user.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-user + servicePort: 8080 + - host: cmii-uav-waypoint.uavcloud-bjtg.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-waypoint + servicePort: 8080 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: all-gateways-ingress + namespace: bjtg + labels: + type: api-gateway + octopus.control: all-ingress-config-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; +spec: + rules: + - host: fake-domain.bjtg.io + http: + paths: + - path: /bjtg/oms/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - path: /bjtg/open/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - path: /bjtg/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 diff --git a/agent-common/real_project/bjtg/k8s-mongo.yaml b/agent-common/real_project/bjtg/k8s-mongo.yaml new file mode 100644 index 0000000..32b6ee2 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-mongo.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mongo + namespace: bjtg + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + type: NodePort + selector: + cmii.app: helm-mongo + cmii.type: middleware + ports: + - port: 27017 + name: server-27017 + targetPort: 27017 + nodePort: 37017 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mongo + namespace: bjtg + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + serviceName: helm-mongo + replicas: 1 + selector: + matchLabels: + cmii.app: helm-mongo + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: helm-mongo + image: 10.250.0.200:8033/cmii/mongo:5.0 + resources: { } + ports: + - containerPort: 27017 + name: mongo27017 + protocol: TCP + env: + - name: MONGO_INITDB_ROOT_USERNAME + value: cmlc + - name: MONGO_INITDB_ROOT_PASSWORD + value: REdPza8#oVlt + volumeMounts: + - name: mongo-data + mountPath: /data/db + readOnly: false + subPath: default/helm-mongo/data/db + volumes: + - name: mongo-data + persistentVolumeClaim: + claimName: helm-mongo +--- diff --git a/agent-common/real_project/bjtg/k8s-mysql.yaml b/agent-common/real_project/bjtg/k8s-mysql.yaml new file mode 100644 index 0000000..f311430 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-mysql.yaml @@ -0,0 +1,423 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + annotations: { } +secrets: + - name: helm-mysql +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + mysql-root-password: "UXpmWFFoZDNiUQ==" + mysql-password: "S0F0cm5PckFKNw==" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + my.cnf: |- + + [mysqld] + port=3306 + basedir=/opt/bitnami/mysql + datadir=/bitnami/mysql/data + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + socket=/opt/bitnami/mysql/tmp/mysql.sock + log-error=/bitnami/mysql/data/error.log + general_log_file = /bitnami/mysql/data/general.log + slow_query_log_file = /bitnami/mysql/data/slow.log + innodb_data_file_path = ibdata1:512M:autoextend + innodb_buffer_pool_size = 512M + innodb_buffer_pool_instances = 2 + innodb_log_file_size = 512M + innodb_log_files_in_group = 4 + innodb_log_files_in_group = 4 + log-bin = /bitnami/mysql/data/mysql-bin + max_binlog_size=1G + transaction_isolation = REPEATABLE-READ + default_storage_engine = innodb + character-set-server = utf8mb4 + collation-server=utf8mb4_bin + binlog_format = ROW + binlog_rows_query_log_events=on + binlog_cache_size=4M + binlog_expire_logs_seconds = 1296000 + max_binlog_cache_size=2G + gtid_mode = on + enforce_gtid_consistency = 1 + sync_binlog = 1 + innodb_flush_log_at_trx_commit = 1 + innodb_flush_method = O_DIRECT + log_slave_updates=1 + relay_log_recovery = 1 + relay-log-purge = 1 + default_time_zone = '+08:00' + lower_case_table_names=1 + log_bin_trust_function_creators=1 + group_concat_max_len=67108864 + innodb_io_capacity = 4000 + innodb_io_capacity_max = 8000 + innodb_flush_sync = 0 + innodb_flush_neighbors = 0 + innodb_write_io_threads = 8 + innodb_read_io_threads = 8 + innodb_purge_threads = 4 + innodb_page_cleaners = 4 + innodb_open_files = 65535 + innodb_max_dirty_pages_pct = 50 + innodb_lru_scan_depth = 4000 + innodb_checksum_algorithm = crc32 + innodb_lock_wait_timeout = 10 + innodb_rollback_on_timeout = 1 + innodb_print_all_deadlocks = 1 + innodb_file_per_table = 1 + innodb_online_alter_log_max_size = 4G + innodb_stats_on_metadata = 0 + innodb_thread_concurrency = 0 + innodb_sync_spin_loops = 100 + innodb_spin_wait_delay = 30 + lock_wait_timeout = 3600 + slow_query_log = 1 + long_query_time = 10 + log_queries_not_using_indexes =1 + log_throttle_queries_not_using_indexes = 60 + min_examined_row_limit = 100 + log_slow_admin_statements = 1 + log_slow_slave_statements = 1 + default_authentication_plugin=mysql_native_password + skip-name-resolve=1 + explicit_defaults_for_timestamp=1 + plugin_dir=/opt/bitnami/mysql/plugin + max_allowed_packet=128M + max_connections = 2000 + max_connect_errors = 1000000 + table_definition_cache=2000 + table_open_cache_instances=64 + tablespace_definition_cache=1024 + thread_cache_size=256 + interactive_timeout = 600 + wait_timeout = 600 + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=32M + bind-address=0.0.0.0 + performance_schema = 1 + performance_schema_instrument = '%memory%=on' + performance_schema_instrument = '%lock%=on' + innodb_monitor_enable=ALL + + [mysql] + no-auto-rehash + + [mysqldump] + quick + max_allowed_packet = 32M + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql-init-scripts + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + create_users_grants_core.sql: |- + create + user zyly@'%' identified by 'Cmii@451315'; + grant select on *.* to zyly@'%'; + create + user zyly_qc@'%' identified by 'Uh)E_owCyb16'; + grant all + on *.* to zyly_qc@'%'; + create + user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; + grant all + on *.* to k8s_admin@'%'; + create + user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; + grant all + on *.* to audit_dba@'%'; + create + user db_backup@'%' identified by 'RU5Pu(4FGdT9'; + GRANT + SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT + on *.* to db_backup@'%'; + create + user monitor@'%' identified by 'PL3#nGtrWbf-'; + grant REPLICATION + CLIENT on *.* to monitor@'%'; + flush + privileges; +--- +kind: Service +apiVersion: v1 +metadata: + name: cmii-mysql + namespace: bjtg + labels: + app.kubernetes.io/component: primary + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.app: mysql + cmii.type: middleware + octopus.control: mysql-db-wdd +spec: + ports: + - name: mysql + protocol: TCP + port: 13306 + targetPort: mysql + selector: + app.kubernetes.io/component: primary + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.app: mysql + cmii.type: middleware + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql-headless + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: 3306 + targetPort: mysql + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: NodePort + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: mysql + nodePort: 33306 + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + serviceName: helm-mysql + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-mysql + affinity: { } + nodeSelector: + mysql-deploy: "true" + securityContext: + fsGroup: 1001 + initContainers: + - name: change-volume-permissions + image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + - -ec + - | + chown -R 1001:1001 /bitnami/mysql + securityContext: + runAsUser: 0 + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + containers: + - name: mysql + image: 10.250.0.200:8033/cmii/mysql:8.1.0-debian-11-r42 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "true" + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: helm-mysql + key: mysql-root-password + - name: MYSQL_DATABASE + value: "cmii" + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + readinessProbe: + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + resources: + limits: { } + requests: { } + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + volumes: + - name: config + configMap: + name: helm-mysql + - name: custom-init-scripts + configMap: + name: helm-mysql-init-scripts + - name: mysql-data + hostPath: + path: /var/lib/docker/mysql-pv/bjtg/ diff --git a/agent-common/real_project/bjtg/k8s-nacos.yaml b/agent-common/real_project/bjtg/k8s-nacos.yaml new file mode 100644 index 0000000..a80727b --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-nacos.yaml @@ -0,0 +1,130 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-nacos-cm + namespace: bjtg + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.6.0 +data: + mysql.db.name: "cmii_nacos_config" + mysql.db.host: "helm-mysql" + mysql.port: "3306" + mysql.user: "k8s_admin" + mysql.password: "fP#UaH6qQ3)8" +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-nacos + namespace: bjtg + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.6.0 +spec: + type: NodePort + selector: + cmii.app: helm-nacos + cmii.type: middleware + ports: + - port: 8848 + name: server + targetPort: 8848 + nodePort: 38848 + - port: 9848 + name: server12 + targetPort: 9848 + - port: 9849 + name: server23 + targetPort: 9849 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-nacos + namespace: bjtg + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.6.0 +spec: + serviceName: helm-nacos + replicas: 1 + selector: + matchLabels: + cmii.app: helm-nacos + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/version: 5.6.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: nacos-server + image: 10.250.0.200:8033/cmii/nacos-server:v2.1.2 + ports: + - containerPort: 8848 + name: dashboard + - containerPort: 9848 + name: tcp-9848 + - containerPort: 9849 + name: tcp-9849 + env: + - name: NACOS_AUTH_ENABLE + value: "false" + - name: NACOS_REPLICAS + value: "1" + - name: MYSQL_SERVICE_DB_NAME + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.name + - name: MYSQL_SERVICE_PORT + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.port + - name: MYSQL_SERVICE_USER + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.user + - name: MYSQL_SERVICE_PASSWORD + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.password + - name: MYSQL_SERVICE_HOST + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.host + - name: NACOS_SERVER_PORT + value: "8848" + - name: NACOS_APPLICATION_PORT + value: "8848" + - name: PREFER_HOST_MODE + value: "hostname" + - name: MODE + value: standalone + - name: SPRING_DATASOURCE_PLATFORM + value: mysql +--- diff --git a/agent-common/real_project/bjtg/k8s-nfs-test.yaml b/agent-common/real_project/bjtg/k8s-nfs-test.yaml new file mode 100644 index 0000000..965765f --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-nfs-test.yaml @@ -0,0 +1,38 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-prod-distribute + resources: + requests: + storage: 1Mi +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod +spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: test-pod + image: 10.250.0.200:8033/cmii/busybox:latest + command: + - "/bin/sh" + args: + - "-c" + - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim #与PVC名称保持一致 diff --git a/agent-common/real_project/bjtg/k8s-nfs.yaml b/agent-common/real_project/bjtg/k8s-nfs.yaml new file mode 100644 index 0000000..f65ac7e --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-nfs.yaml @@ -0,0 +1,114 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #根据实际环境设定namespace,下面类同 +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: ClusterRole + # name: nfs-client-provisioner-runner + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get", "list", "watch", "create", "update", "patch" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs-prod-distribute +provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #与RBAC文件中的namespace保持一致 +spec: + imagePullSecrets: + - name: harborsecret + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: 10.250.0.200:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: cmlc-nfs-storage + - name: NFS_SERVER + value: + - name: NFS_PATH + value: /var/lib/docker/nfs_data + volumes: + - name: nfs-client-root + nfs: + server: + path: /var/lib/docker/nfs_data diff --git a/agent-common/real_project/bjtg/k8s-pvc.yaml b/agent-common/real_project/bjtg/k8s-pvc.yaml new file mode 100644 index 0000000..3cfa2a2 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-pvc.yaml @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-backend-log-pvc + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: nfs-backend-log-pvc + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-emqxs + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: helm-emqxs + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-mongo + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: helm-mongo + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 30Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: helm-rabbitmq + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi diff --git a/agent-common/real_project/bjtg/k8s-rabbitmq.yaml b/agent-common/real_project/bjtg/k8s-rabbitmq.yaml new file mode 100644 index 0000000..5f5f7f2 --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-rabbitmq.yaml @@ -0,0 +1,328 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +automountServiceAccountToken: true +secrets: + - name: helm-rabbitmq +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +type: Opaque +data: + rabbitmq-password: "blljUk45MXIuX2hq" + rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-rabbitmq-config + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +data: + rabbitmq.conf: |- + ## Username and password + ## + default_user = admin + default_pass = nYcRN91r._hj + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + #default_vhost = default-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +subjects: + - kind: ServiceAccount + name: helm-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helm-rabbitmq-endpoint-reader +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq-headless + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: dashboard + port: 15672 + targetPort: stats + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: bjtg + publishNotReadyAddresses: true +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +spec: + type: NodePort + ports: + - name: amqp + port: 5672 + targetPort: amqp + nodePort: 35672 + - name: dashboard + port: 15672 + targetPort: dashboard + nodePort: 36675 + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: bjtg +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +spec: + serviceName: helm-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: bjtg + template: + metadata: + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq + annotations: + checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 + checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-rabbitmq + affinity: { } + securityContext: + fsGroup: 5001 + runAsUser: 5001 + terminationGracePeriodSeconds: 120 + initContainers: + - name: volume-permissions + image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + resources: + limits: { } + requests: { } + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + containers: + - name: rabbitmq + image: 10.250.0.200:8033/cmii/rabbitmq:3.9.12-debian-10-r3 + imagePullPolicy: "Always" + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "helm-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_FORCE_BOOT + value: "no" + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: "no" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: "admin" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" + ports: + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: dashboard + containerPort: 15672 + - name: epmd + containerPort: 4369 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: 120 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" + else + rabbitmqctl stop_app + fi + resources: + limits: { } + requests: { } + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + volumes: + - name: configuration + configMap: + name: helm-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - name: data + persistentVolumeClaim: + claimName: helm-rabbitmq diff --git a/agent-common/real_project/bjtg/k8s-redis.yaml b/agent-common/real_project/bjtg/k8s-redis.yaml new file mode 100644 index 0000000..107484a --- /dev/null +++ b/agent-common/real_project/bjtg/k8s-redis.yaml @@ -0,0 +1,585 @@ +--- +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: helm-redis + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-redis + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + redis-password: "TWNhY2hlQDQ1MjI=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-configuration + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +data: + redis.conf: |- + # User-supplied common configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # End of common configuration + master.conf: |- + dir /data + # User-supplied master configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of master configuration + replica.conf: |- + dir /data + slave-read-only yes + # User-supplied replica configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of replica configuration +--- +# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-health + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-scripts + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +data: + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + exec redis-server "${ARGS[@]}" + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo 26379 + ;; + "REDIS") + echo 6379 + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + echo "${hostname}.${HEADLESS_SERVICE}" + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + exec redis-server "${ARGS[@]}" +--- +# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-headless + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg +--- +# Source: outside-deploy/charts/redis-db/templates/master/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-master + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + type: ClusterIP + + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-replicas + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + type: ClusterIP + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + app.kubernetes.io/component: replica +--- +# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-master + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + affinity: { } + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + imagePullSecrets: + - name: harborsecret + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: tmp + emptyDir: { } + - name: redis-data + emptyDir: { } +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-replicas + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + app.kubernetes.io/component: replica + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + imagePullSecrets: + - name: harborsecret + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local + - name: REDIS_MASTER_PORT_NUMBER + value: "6379" + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: redis-data + emptyDir: { } + diff --git a/agent-operator/deploy/z_bjtg/k8s-srs.yaml b/agent-common/real_project/bjtg/k8s-srs.yaml old mode 100755 new mode 100644 similarity index 92% rename from agent-operator/deploy/z_bjtg/k8s-srs.yaml rename to agent-common/real_project/bjtg/k8s-srs.yaml index 60a99df..6811efd --- a/agent-operator/deploy/z_bjtg/k8s-srs.yaml +++ b/agent-common/real_project/bjtg/k8s-srs.yaml @@ -83,7 +83,7 @@ data: hls_m3u8_file [app]/[stream].m3u8; hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts; hls_cleanup on; - hls_entry_prefix http://10.250.0.110:8888; + hls_entry_prefix http://10.250.0.200:8888; } } --- @@ -192,7 +192,6 @@ spec: srs-role: rtc template: metadata: - creationTimestamp: null labels: srs-role: rtc spec: @@ -209,7 +208,7 @@ spec: sizeLimit: 8Gi containers: - name: srs-rtc - image: 10.250.0.110:8033/cmii/srs:v5.0.195 + image: 10.250.0.200:8033/cmii/srs:v5.0.195 ports: - name: srs-rtmp containerPort: 30935 @@ -231,11 +230,11 @@ spec: protocol: UDP env: - name: CANDIDATE - value: 10.250.0.110 + value: 10.250.0.200 resources: limits: - cpu: 1200m - memory: 6Gi + cpu: 2000m + memory: 4Gi requests: cpu: 100m memory: 256Mi @@ -253,10 +252,10 @@ spec: terminationMessagePolicy: File imagePullPolicy: Always - name: oss-adaptor - image: 10.250.0.110:8033/cmii/cmii-srs-oss-adaptor:2023-SA + image: 10.250.0.200:8033/cmii/cmii-srs-oss-adaptor:2023-SA env: - name: OSS_ENDPOINT - value: 'http://10.250.0.110:9000' + value: 'http://:9000' - name: OSS_AK value: cmii - name: OSS_SK @@ -281,7 +280,7 @@ spec: value: 'yes' resources: limits: - cpu: 1200m + cpu: 2000m memory: 4Gi requests: cpu: 100m @@ -330,7 +329,6 @@ spec: live-role: op-v2 template: metadata: - creationTimestamp: null labels: live-role: op-v2 spec: @@ -344,7 +342,7 @@ spec: defaultMode: 420 containers: - name: helm-live-op-v2 - image: 10.250.0.110:8033/cmii/cmii-live-operator:5.2.0 + image: 10.250.0.200:8033/cmii/cmii-live-operator:5.2.0 ports: - name: operator containerPort: 8080 @@ -362,7 +360,7 @@ spec: subPath: bootstrap.yaml livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: 8080 scheme: HTTP initialDelaySeconds: 60 @@ -372,7 +370,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: 8080 scheme: HTTP initialDelaySeconds: 60 @@ -460,7 +458,7 @@ data: info: name: cmii-live-operator description: cmii-live-operator - version: 5.5.0 + version: 5.6.0 scanPackage: com.cmii.live.op cloud: nacos: @@ -470,11 +468,11 @@ data: server-addr: helm-nacos:8848 extension-configs: - data-id: cmii-live-operator.yml - group: 5.5.0 + group: 5.6.0 refresh: true shared-configs: - data-id: cmii-backend-system.yml - group: 5.5.0 + group: 5.6.0 refresh: true discovery: enabled: false @@ -483,17 +481,16 @@ data: engine: type: srs endpoint: 'http://helm-live-srs-svc:1985' - proto: - rtmp: 'rtmp://10.250.0.110:30935' - rtsp: 'rtsp://10.250.0.110:30554' - srt: 'srt://10.250.0.110:30556' - flv: 'http://10.250.0.110:30500' - hls: 'http://10.250.0.110:30500' - rtc: 'webrtc://10.250.0.110:30557' - replay: 'https://10.250.0.110:30333' + rtmp: 'rtmp://10.250.0.200:30935' + rtsp: 'rtsp://10.250.0.200:30554' + srt: 'srt://10.250.0.200:30556' + flv: 'http://10.250.0.200:30500' + hls: 'http://10.250.0.200:30500' + rtc: 'webrtc://10.250.0.200:30090' + replay: 'https://10.250.0.200:30333' minio: - endpoint: http://10.250.0.110:9000 + endpoint: http://:9000 access-key: cmii secret-key: B#923fC7mk bucket: live-cluster-hls diff --git a/agent-common/real_project/bjtg/old/k8s-backend.yaml b/agent-common/real_project/bjtg/old/k8s-backend.yaml new file mode 100644 index 0000000..5276cd0 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-backend.yaml @@ -0,0 +1,6116 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-logger + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-logger + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-logger + image: 10.250.0.200:8033/cmii/cmii-uav-logger:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-logger + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-logger + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-logger + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-logger + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mqtthandler + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mqtthandler + image: 10.250.0.200:8033/cmii/cmii-uav-mqtthandler:5.6.0-30067-071604 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-mqtthandler + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-mqtthandler + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mqtthandler + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-multilink + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-multilink + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-multilink + image: 10.250.0.200:8033/cmii/cmii-uav-multilink:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-multilink + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-multilink + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-multilink + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-multilink + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-industrial-portfolio + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-industrial-portfolio + image: 10.250.0.200:8033/cmii/cmii-uav-industrial-portfolio:5.6.0-071701 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-industrial-portfolio + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-industrial-portfolio + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-industrial-portfolio + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-notice + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-notice + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-notice + image: 10.250.0.200:8033/cmii/cmii-uav-notice:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-notice + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-notice + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-notice + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-notice + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-iam-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-iam-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-iam-gateway + image: 10.250.0.200:8033/cmii/cmii-iam-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-iam-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-iam-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-iam-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-iam-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-gateway + image: 10.250.0.200:8033/cmii/cmii-admin-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-admin-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-admin-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-engine + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-engine + image: 10.250.0.200:8033/cmii/cmii-uav-grid-engine:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-grid-engine + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-grid-engine + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-engine + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-user + image: 10.250.0.200:8033/cmii/cmii-uav-user:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-user + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-user + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-open-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-open-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-open-gateway + image: 10.250.0.200:8033/cmii/cmii-open-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-open-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-open-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-open-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-open-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-user + image: 10.250.0.200:8033/cmii/cmii-admin-user:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-admin-user + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-admin-user + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-user + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-data-post-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-data-post-process + image: 10.250.0.200:8033/cmii/cmii-uav-data-post-process:5.6.0-062401 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-data-post-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-data-post-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-data-post-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-clusters + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-clusters + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-clusters + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-clusters + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-clusters + image: 10.250.0.200:8033/cmii/cmii-uav-clusters:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-clusters + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-clusters + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-clusters + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-clusters + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-clusters + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-process + image: 10.250.0.200:8033/cmii/cmii-uav-process:5.6.0-060601 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-process + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-process + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-depotautoreturn + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-depotautoreturn + image: 10.250.0.200:8033/cmii/cmii-uav-depotautoreturn:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-depotautoreturn + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-depotautoreturn + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-depotautoreturn + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-emergency + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-emergency + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-emergency + image: 10.250.0.200:8033/cmii/cmii-uav-emergency:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-emergency + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-emergency + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-emergency + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-emergency + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uas-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uas-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uas-gateway + image: 10.250.0.200:8033/cmii/cmii-uas-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uas-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uas-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uas-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uas-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-sense-adapter + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-sense-adapter + image: 10.250.0.200:8033/cmii/cmii-uav-sense-adapter:5.6.0-0716 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-sense-adapter + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-sense-adapter + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-sense-adapter + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gis-server + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gis-server + image: 10.250.0.200:8033/cmii/cmii-uav-gis-server:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-gis-server + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-gis-server + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gis-server + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gis-server + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-data + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-data + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-data + image: 10.250.0.200:8033/cmii/cmii-admin-data:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-admin-data + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-admin-data + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-data + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-data + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cloud-live + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cloud-live + image: 10.250.0.200:8033/cmii/cmii-uav-cloud-live:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-cloud-live + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-cloud-live + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cloud-live + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-autowaypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-autowaypoint + image: 10.250.0.200:8033/cmii/cmii-uav-autowaypoint:4.2.0-beta + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-autowaypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-autowaypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-autowaypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cms + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cms + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cms + image: 10.250.0.200:8033/cmii/cmii-uav-cms:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-cms + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-cms + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cms + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cms + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-developer + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-developer + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-developer + image: 10.250.0.200:8033/cmii/cmii-uav-developer:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-developer + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-developer + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-developer + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-developer + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-supervision + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-suav-supervision + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-supervision + image: 10.250.0.200:8033/cmii/cmii-suav-supervision:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-suav-supervision + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-suav-supervision + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-supervision + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-suav-supervision + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-oauth + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-oauth + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-oauth + image: 10.250.0.200:8033/cmii/cmii-uav-oauth:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-oauth + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-oauth + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-oauth + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-oauth + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-tower + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-tower + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-tower + image: 10.250.0.200:8033/cmii/cmii-uav-tower:5.6.0-062601 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-tower + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-tower + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-tower + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-tower + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mission + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mission + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mission + image: 10.250.0.200:8033/cmii/cmii-uav-mission:5.5.0-30015-061801 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-mission + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-mission + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mission + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mission + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-kpi-monitor + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-kpi-monitor + image: 10.250.0.200:8033/cmii/cmii-uav-kpi-monitor:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-kpi-monitor + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-kpi-monitor + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-kpi-monitor + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-kpi-monitor + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-airspace + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-airspace + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-airspace + image: 10.250.0.200:8033/cmii/cmii-uav-airspace:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-airspace + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-airspace + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-airspace + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-airspace + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-surveillance + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-surveillance + image: 10.250.0.200:8033/cmii/cmii-uav-surveillance:5.6.0-30015-070801 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-surveillance + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-surveillance + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-surveillance + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-surveillance + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-manage + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-manage + image: 10.250.0.200:8033/cmii/cmii-uav-grid-manage:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-grid-manage + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-grid-manage + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-manage + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gateway + image: 10.250.0.200:8033/cmii/cmii-uav-gateway:5.6.0-061202 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gateway + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-integration + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-integration + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-integration + image: 10.250.0.200:8033/cmii/cmii-uav-integration:5.7.0-30015-29835-071601 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-integration + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-integration + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-integration + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-integration + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-datasource + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-datasource + image: 10.250.0.200:8033/cmii/cmii-uav-grid-datasource:5.2.0-24810 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-grid-datasource + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-grid-datasource + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-datasource + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-app-release + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-app-release + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-app-release + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-app-release + image: 10.250.0.200:8033/cmii/cmii-app-release:4.2.0-validation + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-app-release + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-app-release + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-app-release + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-app-release + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-material-warehouse + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-material-warehouse + image: 10.250.0.200:8033/cmii/cmii-uav-material-warehouse:5.6.0-062602 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-material-warehouse + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-material-warehouse + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-material-warehouse + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-brain + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-brain + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-brain + image: 10.250.0.200:8033/cmii/cmii-uav-brain:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-brain + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-brain + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-brain + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-brain + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-bridge + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-bridge + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-bridge + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-bridge + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-bridge + image: 10.250.0.200:8033/cmii/cmii-uav-bridge:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-bridge + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-bridge + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-bridge + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-bridge + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-bridge + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-waypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-waypoint + image: 10.250.0.200:8033/cmii/cmii-uav-waypoint:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-waypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-waypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-waypoint + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-waypoint + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-alarm + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-alarm + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-alarm + image: 10.250.0.200:8033/cmii/cmii-uav-alarm:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-alarm + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-alarm + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-alarm + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-alarm + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-device + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-device + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-device + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-device + image: 10.250.0.200:8033/cmii/cmii-uav-device:5.6.0-0715 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-device + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-device + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-device + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-device + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-threedsimulation + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-threedsimulation + image: 10.250.0.200:8033/cmii/cmii-uav-threedsimulation:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-threedsimulation + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uav-threedsimulation + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-threedsimulation + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uas-lifecycle + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uas-lifecycle + image: 10.250.0.200:8033/cmii/cmii-uas-lifecycle:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uas-lifecycle + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.6.0 + - name: SYS_CONFIG_GROUP + value: 5.6.0 + - name: IMAGE_VERSION + value: 5.6.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: bjtg/cmii-uas-lifecycle + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uas-lifecycle + namespace: bjtg + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 diff --git a/agent-common/real_project/bjtg/old/k8s-configmap.yaml b/agent-common/real_project/bjtg/old/k8s-configmap.yaml new file mode 100644 index 0000000..f46eaa6 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-configmap.yaml @@ -0,0 +1,420 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-cmsportal + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "cmsportal", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-open + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "open", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-splice + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "splice", + AppClientId: "APP_zE0M3sTRXrCIJS8Y" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-traffic + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "traffic", + AppClientId: "APP_Jc8i2wOQ1t73QEJS" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-threedsimulation + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "threedsimulation", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-hljtt + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "hljtt", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uas + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "uas", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-armypeople + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "armypeople", + AppClientId: "APP_UIegse6Lfou9pO1U" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-dispatchh5 + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "dispatchh5", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-ai-brain + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "ai-brain", + AppClientId: "APP_rafnuCAmBESIVYMH" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-media + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "media", + AppClientId: "APP_4AU8lbifESQO4FD6" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-jiangsuwenlv + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "jiangsuwenlv", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-pangu + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-multiterminal + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "multiterminal", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-base + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "base", + AppClientId: "APP_9LY41OaKSqk2btY0" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-security + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "security", + AppClientId: "APP_JUSEMc7afyWXxvE7" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervision + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "supervision", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-qinghaitourism + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "qinghaitourism", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-qingdao + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "qingdao", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-mws + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "mws", + AppClientId: "APP_uKniXPELlRERBBwK" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-logistics + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "logistics", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-seniclive + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "seniclive", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-share + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "share", + AppClientId: "APP_4lVSVI0ZGxTssir8" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-detection + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "detection", + AppClientId: "APP_FDHW2VLVDWPnnOCy" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uasms + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "uasms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervisionh5 + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "supervisionh5", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-oms + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "oms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-securityh5 + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "securityh5", + AppClientId: "APP_N3ImO0Ubfu9peRHD" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-visualization + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "visualization", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-emergency + namespace: bjtg +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "demo", + CloudHOST: "10.250.0.200:8888", + ApplicationShortName: "emergency", + AppClientId: "APP_aGsTAY1uMZrpKdfk" + } diff --git a/agent-common/real_project/bjtg/old/k8s-dashboard.yaml b/agent-common/real_project/bjtg/old/k8s-dashboard.yaml new file mode 100644 index 0000000..1134919 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-dashboard.yaml @@ -0,0 +1,309 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + name: kubernetes-dashboard + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + nodePort: 30554 + selector: + k8s-app: kubernetes-dashboard + type: NodePort + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kube-system +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kube-system +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kube-system + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [ "" ] + resources: [ "secrets" ] + resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] + verbs: [ "get", "update", "delete" ] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [ "" ] + resources: [ "configmaps" ] + resourceNames: [ "kubernetes-dashboard-settings" ] + verbs: [ "get", "update" ] + # Allow Dashboard to get metrics. + - apiGroups: [ "" ] + resources: [ "services" ] + resourceNames: [ "heapster", "dashboard-metrics-scraper" ] + verbs: [ "proxy" ] + - apiGroups: [ "" ] + resources: [ "services/proxy" ] + resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] + verbs: [ "get" ] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: [ "metrics.k8s.io" ] + resources: [ "pods", "nodes" ] + verbs: [ "get", "list", "watch" ] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: kubernetes-dashboard + image: 10.250.0.200:8033/cmii/dashboard:v2.0.1 + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kube-system + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: { } + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: 10.250.0.200:8033/cmii/metrics-scraper:v1.0.4 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: { } +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: admin-user + namespace: kube-system diff --git a/agent-common/real_project/bjtg/old/k8s-emqx.yaml b/agent-common/real_project/bjtg/old/k8s-emqx.yaml new file mode 100644 index 0000000..77ac63b --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-emqx.yaml @@ -0,0 +1,274 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-emqxs + namespace: bjtg +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-env + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +data: + EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443" + EMQX_NAME: "helm-emqxs" + EMQX_CLUSTER__DISCOVERY: "k8s" + EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs" + EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless" + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" + EMQX_CLUSTER__K8S__namespace: "bjtg" + EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" + EMQX_ALLOW_ANONYMOUS: "false" + EMQX_ACL_NOMATCH: "deny" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-cm + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +data: + emqx_auth_mnesia.conf: |- + auth.mnesia.password_hash = sha256 + + # clientid 认证数据 + # auth.client.1.clientid = admin + # auth.client.1.password = 4YPk*DS%+5 + + ## username 认证数据 + auth.user.1.username = admin + auth.user.1.password = odD8#Ve7.B + auth.user.2.username = cmlc + auth.user.2.password = odD8#Ve7.B + + acl.conf: |- + {allow, {user, "admin"}, pubsub, ["admin/#"]}. + {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. + {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. + {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. + {allow, all}. + + loaded_plugins: |- + {emqx_auth_mnesia,true}. + {emqx_auth_mnesia,true}. + {emqx_management, true}. + {emqx_recon, true}. + {emqx_retainer, false}. + {emqx_dashboard, true}. + {emqx_telemetry, true}. + {emqx_rule_engine, true}. + {emqx_bridge_mqtt, false}. +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-emqxs + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + replicas: 1 + serviceName: helm-emqxs-headless + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + template: + metadata: + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + spec: + affinity: { } + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-emqxs + containers: + - name: helm-emqxs + image: 10.250.0.200:8033/cmii/emqx:4.4.9 + imagePullPolicy: Always + ports: + - name: mqtt + containerPort: 1883 + - name: mqttssl + containerPort: 8883 + - name: mgmt + containerPort: 8081 + - name: ws + containerPort: 8083 + - name: wss + containerPort: 8084 + - name: dashboard + containerPort: 18083 + - name: ekka + containerPort: 4370 + envFrom: + - configMapRef: + name: helm-emqxs-env + resources: { } + volumeMounts: + - name: emqx-data + mountPath: "/opt/emqx/data/mnesia" + readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf" + subPath: emqx_auth_mnesia.conf + readOnly: false + # - name: helm-emqxs-cm + # mountPath: "/opt/emqx/etc/acl.conf" + # subPath: "acl.conf" + # readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/data/loaded_plugins" + subPath: loaded_plugins + readOnly: false + volumes: + - name: emqx-data + persistentVolumeClaim: + claimName: helm-emqxs + - name: helm-emqxs-cm + configMap: + name: helm-emqxs-cm + items: + - key: emqx_auth_mnesia.conf + path: emqx_auth_mnesia.conf + - key: acl.conf + path: acl.conf + - key: loaded_plugins + path: loaded_plugins +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: bjtg +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - watch + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: bjtg +subjects: + - kind: ServiceAccount + name: helm-emqxs + namespace: bjtg +roleRef: + kind: Role + name: helm-emqxs + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + type: NodePort + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - port: 1883 + name: mqtt + targetPort: 1883 + nodePort: 31883 + - port: 18083 + name: dashboard + targetPort: 18083 + nodePort: 38085 + - port: 8083 + name: mqtt-websocket + targetPort: 8083 + nodePort: 38083 +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs-headless + namespace: bjtg + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + clusterIP: None + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - name: mqtt + port: 1883 + protocol: TCP + targetPort: 1883 + - name: mqttssl + port: 8883 + protocol: TCP + targetPort: 8883 + - name: mgmt + port: 8081 + protocol: TCP + targetPort: 8081 + - name: websocket + port: 8083 + protocol: TCP + targetPort: 8083 + - name: wss + port: 8084 + protocol: TCP + targetPort: 8084 + - name: dashboard + port: 18083 + protocol: TCP + targetPort: 18083 + - name: ekka + port: 4370 + protocol: TCP + targetPort: 4370 diff --git a/agent-common/real_project/bjtg/old/k8s-frontend.yaml b/agent-common/real_project/bjtg/old/k8s-frontend.yaml new file mode 100644 index 0000000..0cc07b5 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-frontend.yaml @@ -0,0 +1,2606 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-cm + namespace: bjtg + labels: + cmii.type: frontend +data: + nginx.conf: | + server { + listen 9528; + server_name localhost; + gzip on; + + location / { + root /home/cmii-platform/dist; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root html; + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-seniclive + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-seniclive + image: 10.250.0.200:8033/cmii/cmii-uav-platform-seniclive:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-seniclive + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-seniclive + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-seniclive + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-seniclive + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-platform-supervision + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-platform-supervision + image: 10.250.0.200:8033/cmii/cmii-suav-platform-supervision:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-suav-platform-supervision + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-supervision + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-platform-supervision + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-logistics + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-logistics + image: 10.250.0.200:8033/cmii/cmii-uav-platform-logistics:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-logistics + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-logistics + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-logistics + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-open + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-open + image: 10.250.0.200:8033/cmii/cmii-uav-platform-open:5.6.0-0704 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-open + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-open + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-open + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-qinghaitourism + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-qinghaitourism + image: 10.250.0.200:8033/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-qinghaitourism + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-qinghaitourism + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-qinghaitourism + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-qinghaitourism + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-hljtt + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-hljtt + image: 10.250.0.200:8033/cmii/cmii-uav-platform-hljtt:5.3.0-hjltt + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-hljtt + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-hljtt + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-hljtt + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-hljtt + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform + image: 10.250.0.200:8033/cmii/cmii-uav-platform:5.6.0-29267-0717 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-pangu + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uasms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uasms + image: 10.250.0.200:8033/cmii/cmii-uav-platform-uasms:5.6.0-0709 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-uasms + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uasms + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uasms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-securityh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-securityh5 + image: 10.250.0.200:8033/cmii/cmii-uav-platform-securityh5:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-securityh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-securityh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-securityh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-splice + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-splice + image: 10.250.0.200:8033/cmii/cmii-uav-platform-splice:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-splice + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-splice + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-splice + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-dispatchh5 + image: 10.250.0.200:8033/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-dispatchh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-dispatchh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-media + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-media + image: 10.250.0.200:8033/cmii/cmii-uav-platform-media:5.6.0-0710 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-media + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-media + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-media + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-qingdao + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-qingdao + image: 10.250.0.200:8033/cmii/cmii-uav-platform-qingdao:4.1.6-24238-qingdao + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-qingdao + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-qingdao + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-qingdao + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-qingdao + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-platform-supervisionh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-platform-supervisionh5 + image: 10.250.0.200:8033/cmii/cmii-suav-platform-supervisionh5:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-suav-platform-supervisionh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-supervisionh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-platform-supervisionh5 + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-oms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-oms + image: 10.250.0.200:8033/cmii/cmii-uav-platform-oms:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-oms + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-oms + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-oms + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-visualization + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-visualization + image: 10.250.0.200:8033/cmii/cmii-uav-platform-visualization:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-visualization + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-visualization + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-visualization + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-visualization + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-ai-brain + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-ai-brain + image: 10.250.0.200:8033/cmii/cmii-uav-platform-ai-brain:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-ai-brain + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-ai-brain + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-ai-brain + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-jiangsuwenlv + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-jiangsuwenlv + image: 10.250.0.200:8033/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-jiangsuwenlv + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-jiangsuwenlv + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-jiangsuwenlv + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-jiangsuwenlv + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-multiterminal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-multiterminal + image: 10.250.0.200:8033/cmii/cmii-uav-platform-multiterminal:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-multiterminal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-multiterminal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-multiterminal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-emergency-rescue + image: 10.250.0.200:8033/cmii/cmii-uav-platform-emergency-rescue:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-emergency-rescue + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-emergency + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-base + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-base + image: 10.250.0.200:8033/cmii/cmii-uav-platform-base:5.4.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-base + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-base + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-base + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-base + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-cms-portal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-cms-portal + image: 10.250.0.200:8033/cmii/cmii-uav-platform-cms-portal:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-cms-portal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-cmsportal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-cms-portal + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-detection + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-detection + image: 10.250.0.200:8033/cmii/cmii-uav-platform-detection:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-detection + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-detection + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-detection + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-mws + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-mws + image: 10.250.0.200:8033/cmii/cmii-uav-platform-mws:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-mws + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-mws + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-mws + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-share + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-share + image: 10.250.0.200:8033/cmii/cmii-uav-platform-share:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-share + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-share + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-share + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-security + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-security + image: 10.250.0.200:8033/cmii/cmii-uav-platform-security:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-security + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-security + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-security + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-threedsimulation + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-threedsimulation + image: 10.250.0.200:8033/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-threedsimulation + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-threedsimulation + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-threedsimulation + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-armypeople + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-armypeople + image: 10.250.0.200:8033/cmii/cmii-uav-platform-armypeople:5.6.0-28028-071102 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-armypeople + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-armypeople + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-armypeople + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uas + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.6.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uas + image: 10.250.0.200:8033/cmii/cmii-uav-platform-uas:5.6.0-0709 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: bjtg + - name: APPLICATION_NAME + value: cmii-uav-platform-uas + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uas + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uas + namespace: bjtg + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.6.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 diff --git a/agent-common/real_project/bjtg/old/k8s-ingress.yaml b/agent-common/real_project/bjtg/old/k8s-ingress.yaml new file mode 100644 index 0000000..668ef79 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-ingress.yaml @@ -0,0 +1,604 @@ +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: frontend-applications-ingress + namespace: bjtg + labels: + type: frontend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^(/supervision)$ $1/ redirect; + rewrite ^(/supervisionh5)$ $1/ redirect; + rewrite ^(/pangu)$ $1/ redirect; + rewrite ^(/ai-brain)$ $1/ redirect; + rewrite ^(/armypeople)$ $1/ redirect; + rewrite ^(/base)$ $1/ redirect; + rewrite ^(/cmsportal)$ $1/ redirect; + rewrite ^(/detection)$ $1/ redirect; + rewrite ^(/dispatchh5)$ $1/ redirect; + rewrite ^(/emergency)$ $1/ redirect; + rewrite ^(/hljtt)$ $1/ redirect; + rewrite ^(/jiangsuwenlv)$ $1/ redirect; + rewrite ^(/logistics)$ $1/ redirect; + rewrite ^(/media)$ $1/ redirect; + rewrite ^(/multiterminal)$ $1/ redirect; + rewrite ^(/mws)$ $1/ redirect; + rewrite ^(/oms)$ $1/ redirect; + rewrite ^(/open)$ $1/ redirect; + rewrite ^(/qingdao)$ $1/ redirect; + rewrite ^(/qinghaitourism)$ $1/ redirect; + rewrite ^(/security)$ $1/ redirect; + rewrite ^(/securityh5)$ $1/ redirect; + rewrite ^(/seniclive)$ $1/ redirect; + rewrite ^(/share)$ $1/ redirect; + rewrite ^(/splice)$ $1/ redirect; + rewrite ^(/threedsimulation)$ $1/ redirect; + rewrite ^(/traffic)$ $1/ redirect; + rewrite ^(/uas)$ $1/ redirect; + rewrite ^(/uasms)$ $1/ redirect; + rewrite ^(/visualization)$ $1/ redirect; +spec: + rules: + - host: fake-domain.bjtg.io + http: + paths: + - path: /demo/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /demo/supervision/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervision + servicePort: 9528 + - path: /demo/supervisionh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervisionh5 + servicePort: 9528 + - path: /demo/pangu/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /demo/ai-brain/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-ai-brain + servicePort: 9528 + - path: /demo/armypeople/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-armypeople + servicePort: 9528 + - path: /demo/base/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-base + servicePort: 9528 + - path: /demo/cmsportal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /demo/detection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-detection + servicePort: 9528 + - path: /demo/dispatchh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-dispatchh5 + servicePort: 9528 + - path: /demo/emergency/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-emergency-rescue + servicePort: 9528 + - path: /demo/hljtt/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-hljtt + servicePort: 9528 + - path: /demo/jiangsuwenlv/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-jiangsuwenlv + servicePort: 9528 + - path: /demo/logistics/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-logistics + servicePort: 9528 + - path: /demo/media/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-media + servicePort: 9528 + - path: /demo/multiterminal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-multiterminal + servicePort: 9528 + - path: /demo/mws/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws + servicePort: 9528 + - path: /demo/oms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-oms + servicePort: 9528 + - path: /demo/open/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-open + servicePort: 9528 + - path: /demo/qingdao/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qingdao + servicePort: 9528 + - path: /demo/qinghaitourism/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qinghaitourism + servicePort: 9528 + - path: /demo/security/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-security + servicePort: 9528 + - path: /demo/securityh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-securityh5 + servicePort: 9528 + - path: /demo/seniclive/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-seniclive + servicePort: 9528 + - path: /demo/share/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-share + servicePort: 9528 + - path: /demo/splice/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice + servicePort: 9528 + - path: /demo/threedsimulation/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-threedsimulation + servicePort: 9528 + - path: /demo/traffic/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-traffic + servicePort: 9528 + - path: /demo/uas/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uas + servicePort: 9528 + - path: /demo/uasms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uasms + servicePort: 9528 + - path: /demo/visualization/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-visualization + servicePort: 9528 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: backend-applications-ingress + namespace: bjtg + labels: + type: backend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" +spec: + rules: + - host: cmii-admin-data.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-data + servicePort: 8080 + - host: cmii-admin-gateway.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - host: cmii-admin-user.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-user + servicePort: 8080 + - host: cmii-app-release.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-app-release + servicePort: 8080 + - host: cmii-open-gateway.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - host: cmii-suav-supervision.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-supervision + servicePort: 8080 + - host: cmii-uas-gateway.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-gateway + servicePort: 8080 + - host: cmii-uas-lifecycle.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-lifecycle + servicePort: 8080 + - host: cmii-uav-airspace.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-airspace + servicePort: 8080 + - host: cmii-uav-alarm.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-alarm + servicePort: 8080 + - host: cmii-uav-autowaypoint.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-autowaypoint + servicePort: 8080 + - host: cmii-uav-brain.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-brain + servicePort: 8080 + - host: cmii-uav-bridge.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-bridge + servicePort: 8080 + - host: cmii-uav-cloud-live.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cloud-live + servicePort: 8080 + - host: cmii-uav-clusters.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-clusters + servicePort: 8080 + - host: cmii-uav-cms.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cms + servicePort: 8080 + - host: cmii-uav-data-post-process.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-data-post-process + servicePort: 8080 + - host: cmii-uav-depotautoreturn.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-depotautoreturn + servicePort: 8080 + - host: cmii-uav-developer.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-developer + servicePort: 8080 + - host: cmii-uav-device.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-device + servicePort: 8080 + - host: cmii-uav-emergency.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-emergency + servicePort: 8080 + - host: cmii-uav-gateway.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 + - host: cmii-uav-gis-server.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gis-server + servicePort: 8080 + - host: cmii-uav-grid-datasource.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-datasource + servicePort: 8080 + - host: cmii-uav-grid-engine.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-engine + servicePort: 8080 + - host: cmii-uav-grid-manage.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-manage + servicePort: 8080 + - host: cmii-uav-industrial-portfolio.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-industrial-portfolio + servicePort: 8080 + - host: cmii-uav-integration.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-integration + servicePort: 8080 + - host: cmii-uav-kpi-monitor.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-kpi-monitor + servicePort: 8080 + - host: cmii-uav-logger.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-logger + servicePort: 8080 + - host: cmii-uav-material-warehouse.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-material-warehouse + servicePort: 8080 + - host: cmii-uav-mission.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mission + servicePort: 8080 + - host: cmii-uav-mqtthandler.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mqtthandler + servicePort: 8080 + - host: cmii-uav-multilink.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-multilink + servicePort: 8080 + - host: cmii-uav-notice.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-notice + servicePort: 8080 + - host: cmii-uav-oauth.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-oauth + servicePort: 8080 + - host: cmii-uav-process.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-process + servicePort: 8080 + - host: cmii-uav-sense-adapter.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-sense-adapter + servicePort: 8080 + - host: cmii-uav-surveillance.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-surveillance + servicePort: 8080 + - host: cmii-uav-threedsimulation.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-threedsimulation + servicePort: 8080 + - host: cmii-uav-tower.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-tower + servicePort: 8080 + - host: cmii-uav-user.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-user + servicePort: 8080 + - host: cmii-uav-waypoint.uavcloud-demo.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-waypoint + servicePort: 8080 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: all-gateways-ingress + namespace: bjtg + labels: + type: api-gateway + octopus.control: all-ingress-config-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; +spec: + rules: + - host: fake-domain.bjtg.io + http: + paths: + - path: /demo/oms/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - path: /demo/open/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - path: /demo/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 diff --git a/agent-common/real_project/bjtg/old/k8s-mongo.yaml b/agent-common/real_project/bjtg/old/k8s-mongo.yaml new file mode 100644 index 0000000..32b6ee2 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-mongo.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mongo + namespace: bjtg + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + type: NodePort + selector: + cmii.app: helm-mongo + cmii.type: middleware + ports: + - port: 27017 + name: server-27017 + targetPort: 27017 + nodePort: 37017 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mongo + namespace: bjtg + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 +spec: + serviceName: helm-mongo + replicas: 1 + selector: + matchLabels: + cmii.app: helm-mongo + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.6.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: helm-mongo + image: 10.250.0.200:8033/cmii/mongo:5.0 + resources: { } + ports: + - containerPort: 27017 + name: mongo27017 + protocol: TCP + env: + - name: MONGO_INITDB_ROOT_USERNAME + value: cmlc + - name: MONGO_INITDB_ROOT_PASSWORD + value: REdPza8#oVlt + volumeMounts: + - name: mongo-data + mountPath: /data/db + readOnly: false + subPath: default/helm-mongo/data/db + volumes: + - name: mongo-data + persistentVolumeClaim: + claimName: helm-mongo +--- diff --git a/agent-common/real_project/bjtg/old/k8s-mysql.yaml b/agent-common/real_project/bjtg/old/k8s-mysql.yaml new file mode 100644 index 0000000..f311430 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-mysql.yaml @@ -0,0 +1,423 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + annotations: { } +secrets: + - name: helm-mysql +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + mysql-root-password: "UXpmWFFoZDNiUQ==" + mysql-password: "S0F0cm5PckFKNw==" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + my.cnf: |- + + [mysqld] + port=3306 + basedir=/opt/bitnami/mysql + datadir=/bitnami/mysql/data + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + socket=/opt/bitnami/mysql/tmp/mysql.sock + log-error=/bitnami/mysql/data/error.log + general_log_file = /bitnami/mysql/data/general.log + slow_query_log_file = /bitnami/mysql/data/slow.log + innodb_data_file_path = ibdata1:512M:autoextend + innodb_buffer_pool_size = 512M + innodb_buffer_pool_instances = 2 + innodb_log_file_size = 512M + innodb_log_files_in_group = 4 + innodb_log_files_in_group = 4 + log-bin = /bitnami/mysql/data/mysql-bin + max_binlog_size=1G + transaction_isolation = REPEATABLE-READ + default_storage_engine = innodb + character-set-server = utf8mb4 + collation-server=utf8mb4_bin + binlog_format = ROW + binlog_rows_query_log_events=on + binlog_cache_size=4M + binlog_expire_logs_seconds = 1296000 + max_binlog_cache_size=2G + gtid_mode = on + enforce_gtid_consistency = 1 + sync_binlog = 1 + innodb_flush_log_at_trx_commit = 1 + innodb_flush_method = O_DIRECT + log_slave_updates=1 + relay_log_recovery = 1 + relay-log-purge = 1 + default_time_zone = '+08:00' + lower_case_table_names=1 + log_bin_trust_function_creators=1 + group_concat_max_len=67108864 + innodb_io_capacity = 4000 + innodb_io_capacity_max = 8000 + innodb_flush_sync = 0 + innodb_flush_neighbors = 0 + innodb_write_io_threads = 8 + innodb_read_io_threads = 8 + innodb_purge_threads = 4 + innodb_page_cleaners = 4 + innodb_open_files = 65535 + innodb_max_dirty_pages_pct = 50 + innodb_lru_scan_depth = 4000 + innodb_checksum_algorithm = crc32 + innodb_lock_wait_timeout = 10 + innodb_rollback_on_timeout = 1 + innodb_print_all_deadlocks = 1 + innodb_file_per_table = 1 + innodb_online_alter_log_max_size = 4G + innodb_stats_on_metadata = 0 + innodb_thread_concurrency = 0 + innodb_sync_spin_loops = 100 + innodb_spin_wait_delay = 30 + lock_wait_timeout = 3600 + slow_query_log = 1 + long_query_time = 10 + log_queries_not_using_indexes =1 + log_throttle_queries_not_using_indexes = 60 + min_examined_row_limit = 100 + log_slow_admin_statements = 1 + log_slow_slave_statements = 1 + default_authentication_plugin=mysql_native_password + skip-name-resolve=1 + explicit_defaults_for_timestamp=1 + plugin_dir=/opt/bitnami/mysql/plugin + max_allowed_packet=128M + max_connections = 2000 + max_connect_errors = 1000000 + table_definition_cache=2000 + table_open_cache_instances=64 + tablespace_definition_cache=1024 + thread_cache_size=256 + interactive_timeout = 600 + wait_timeout = 600 + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=32M + bind-address=0.0.0.0 + performance_schema = 1 + performance_schema_instrument = '%memory%=on' + performance_schema_instrument = '%lock%=on' + innodb_monitor_enable=ALL + + [mysql] + no-auto-rehash + + [mysqldump] + quick + max_allowed_packet = 32M + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql-init-scripts + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + create_users_grants_core.sql: |- + create + user zyly@'%' identified by 'Cmii@451315'; + grant select on *.* to zyly@'%'; + create + user zyly_qc@'%' identified by 'Uh)E_owCyb16'; + grant all + on *.* to zyly_qc@'%'; + create + user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; + grant all + on *.* to k8s_admin@'%'; + create + user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; + grant all + on *.* to audit_dba@'%'; + create + user db_backup@'%' identified by 'RU5Pu(4FGdT9'; + GRANT + SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT + on *.* to db_backup@'%'; + create + user monitor@'%' identified by 'PL3#nGtrWbf-'; + grant REPLICATION + CLIENT on *.* to monitor@'%'; + flush + privileges; +--- +kind: Service +apiVersion: v1 +metadata: + name: cmii-mysql + namespace: bjtg + labels: + app.kubernetes.io/component: primary + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.app: mysql + cmii.type: middleware + octopus.control: mysql-db-wdd +spec: + ports: + - name: mysql + protocol: TCP + port: 13306 + targetPort: mysql + selector: + app.kubernetes.io/component: primary + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.app: mysql + cmii.type: middleware + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql-headless + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: 3306 + targetPort: mysql + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: NodePort + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: mysql + nodePort: 33306 + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mysql + namespace: bjtg + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + serviceName: helm-mysql + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-mysql + affinity: { } + nodeSelector: + mysql-deploy: "true" + securityContext: + fsGroup: 1001 + initContainers: + - name: change-volume-permissions + image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + - -ec + - | + chown -R 1001:1001 /bitnami/mysql + securityContext: + runAsUser: 0 + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + containers: + - name: mysql + image: 10.250.0.200:8033/cmii/mysql:8.1.0-debian-11-r42 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "true" + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: helm-mysql + key: mysql-root-password + - name: MYSQL_DATABASE + value: "cmii" + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + readinessProbe: + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + resources: + limits: { } + requests: { } + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + volumes: + - name: config + configMap: + name: helm-mysql + - name: custom-init-scripts + configMap: + name: helm-mysql-init-scripts + - name: mysql-data + hostPath: + path: /var/lib/docker/mysql-pv/bjtg/ diff --git a/agent-common/real_project/bjtg/old/k8s-nacos.yaml b/agent-common/real_project/bjtg/old/k8s-nacos.yaml new file mode 100644 index 0000000..a80727b --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-nacos.yaml @@ -0,0 +1,130 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-nacos-cm + namespace: bjtg + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.6.0 +data: + mysql.db.name: "cmii_nacos_config" + mysql.db.host: "helm-mysql" + mysql.port: "3306" + mysql.user: "k8s_admin" + mysql.password: "fP#UaH6qQ3)8" +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-nacos + namespace: bjtg + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.6.0 +spec: + type: NodePort + selector: + cmii.app: helm-nacos + cmii.type: middleware + ports: + - port: 8848 + name: server + targetPort: 8848 + nodePort: 38848 + - port: 9848 + name: server12 + targetPort: 9848 + - port: 9849 + name: server23 + targetPort: 9849 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-nacos + namespace: bjtg + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.6.0 +spec: + serviceName: helm-nacos + replicas: 1 + selector: + matchLabels: + cmii.app: helm-nacos + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/version: 5.6.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: nacos-server + image: 10.250.0.200:8033/cmii/nacos-server:v2.1.2 + ports: + - containerPort: 8848 + name: dashboard + - containerPort: 9848 + name: tcp-9848 + - containerPort: 9849 + name: tcp-9849 + env: + - name: NACOS_AUTH_ENABLE + value: "false" + - name: NACOS_REPLICAS + value: "1" + - name: MYSQL_SERVICE_DB_NAME + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.name + - name: MYSQL_SERVICE_PORT + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.port + - name: MYSQL_SERVICE_USER + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.user + - name: MYSQL_SERVICE_PASSWORD + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.password + - name: MYSQL_SERVICE_HOST + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.host + - name: NACOS_SERVER_PORT + value: "8848" + - name: NACOS_APPLICATION_PORT + value: "8848" + - name: PREFER_HOST_MODE + value: "hostname" + - name: MODE + value: standalone + - name: SPRING_DATASOURCE_PLATFORM + value: mysql +--- diff --git a/agent-common/real_project/bjtg/old/k8s-nfs-test.yaml b/agent-common/real_project/bjtg/old/k8s-nfs-test.yaml new file mode 100644 index 0000000..965765f --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-nfs-test.yaml @@ -0,0 +1,38 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-prod-distribute + resources: + requests: + storage: 1Mi +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod +spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: test-pod + image: 10.250.0.200:8033/cmii/busybox:latest + command: + - "/bin/sh" + args: + - "-c" + - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim #与PVC名称保持一致 diff --git a/agent-common/real_project/bjtg/old/k8s-nfs.yaml b/agent-common/real_project/bjtg/old/k8s-nfs.yaml new file mode 100644 index 0000000..f65ac7e --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-nfs.yaml @@ -0,0 +1,114 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #根据实际环境设定namespace,下面类同 +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: ClusterRole + # name: nfs-client-provisioner-runner + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get", "list", "watch", "create", "update", "patch" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs-prod-distribute +provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #与RBAC文件中的namespace保持一致 +spec: + imagePullSecrets: + - name: harborsecret + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: 10.250.0.200:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: cmlc-nfs-storage + - name: NFS_SERVER + value: + - name: NFS_PATH + value: /var/lib/docker/nfs_data + volumes: + - name: nfs-client-root + nfs: + server: + path: /var/lib/docker/nfs_data diff --git a/agent-common/real_project/bjtg/old/k8s-pvc.yaml b/agent-common/real_project/bjtg/old/k8s-pvc.yaml new file mode 100644 index 0000000..3cfa2a2 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-pvc.yaml @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-backend-log-pvc + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: nfs-backend-log-pvc + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-emqxs + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: helm-emqxs + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-mongo + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: helm-mongo + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 30Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + cmii.type: middleware-base + cmii.app: helm-rabbitmq + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.6.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi diff --git a/agent-common/real_project/bjtg/old/k8s-rabbitmq.yaml b/agent-common/real_project/bjtg/old/k8s-rabbitmq.yaml new file mode 100644 index 0000000..5f5f7f2 --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-rabbitmq.yaml @@ -0,0 +1,328 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +automountServiceAccountToken: true +secrets: + - name: helm-rabbitmq +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +type: Opaque +data: + rabbitmq-password: "blljUk45MXIuX2hq" + rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-rabbitmq-config + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +data: + rabbitmq.conf: |- + ## Username and password + ## + default_user = admin + default_pass = nYcRN91r._hj + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + #default_vhost = default-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +subjects: + - kind: ServiceAccount + name: helm-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helm-rabbitmq-endpoint-reader +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq-headless + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: dashboard + port: 15672 + targetPort: stats + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: bjtg + publishNotReadyAddresses: true +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +spec: + type: NodePort + ports: + - name: amqp + port: 5672 + targetPort: amqp + nodePort: 35672 + - name: dashboard + port: 15672 + targetPort: dashboard + nodePort: 36675 + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: bjtg +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-rabbitmq + namespace: bjtg + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq +spec: + serviceName: helm-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: bjtg + template: + metadata: + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: rabbitmq + annotations: + checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 + checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-rabbitmq + affinity: { } + securityContext: + fsGroup: 5001 + runAsUser: 5001 + terminationGracePeriodSeconds: 120 + initContainers: + - name: volume-permissions + image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + resources: + limits: { } + requests: { } + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + containers: + - name: rabbitmq + image: 10.250.0.200:8033/cmii/rabbitmq:3.9.12-debian-10-r3 + imagePullPolicy: "Always" + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "helm-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_FORCE_BOOT + value: "no" + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: "no" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: "admin" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" + ports: + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: dashboard + containerPort: 15672 + - name: epmd + containerPort: 4369 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: 120 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" + else + rabbitmqctl stop_app + fi + resources: + limits: { } + requests: { } + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + volumes: + - name: configuration + configMap: + name: helm-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - name: data + persistentVolumeClaim: + claimName: helm-rabbitmq diff --git a/agent-common/real_project/bjtg/old/k8s-redis.yaml b/agent-common/real_project/bjtg/old/k8s-redis.yaml new file mode 100644 index 0000000..107484a --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-redis.yaml @@ -0,0 +1,585 @@ +--- +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: helm-redis + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-redis + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + redis-password: "TWNhY2hlQDQ1MjI=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-configuration + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +data: + redis.conf: |- + # User-supplied common configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # End of common configuration + master.conf: |- + dir /data + # User-supplied master configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of master configuration + replica.conf: |- + dir /data + slave-read-only yes + # User-supplied replica configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of replica configuration +--- +# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-health + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-scripts + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +data: + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + exec redis-server "${ARGS[@]}" + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo 26379 + ;; + "REDIS") + echo 6379 + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + echo "${hostname}.${HEADLESS_SERVICE}" + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + exec redis-server "${ARGS[@]}" +--- +# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-headless + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg +--- +# Source: outside-deploy/charts/redis-db/templates/master/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-master + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + type: ClusterIP + + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-replicas + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + type: ClusterIP + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + app.kubernetes.io/component: replica +--- +# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-master + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + affinity: { } + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + imagePullSecrets: + - name: harborsecret + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: tmp + emptyDir: { } + - name: redis-data + emptyDir: { } +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-replicas + namespace: bjtg + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: bjtg + app.kubernetes.io/component: replica + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: bjtg + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + imagePullSecrets: + - name: harborsecret + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local + - name: REDIS_MASTER_PORT_NUMBER + value: "6379" + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: redis-data + emptyDir: { } + diff --git a/agent-common/real_project/bjtg/old/k8s-srs.yaml b/agent-common/real_project/bjtg/old/k8s-srs.yaml new file mode 100644 index 0000000..6811efd --- /dev/null +++ b/agent-common/real_project/bjtg/old/k8s-srs.yaml @@ -0,0 +1,496 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-srs-cm + namespace: bjtg + labels: + cmii.app: live-srs + cmii.type: live + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 +data: + srs.rtc.conf: |- + listen 30935; + max_connections 4096; + srs_log_tank console; + srs_log_level info; + srs_log_file /home/srs.log; + daemon off; + http_api { + enabled on; + listen 1985; + crossdomain on; + } + stats { + network 0; + } + http_server { + enabled on; + listen 8080; + dir /home/hls; + } + srt_server { + enabled on; + listen 30556; + maxbw 1000000000; + connect_timeout 4000; + peerlatency 600; + recvlatency 600; + } + rtc_server { + enabled on; + listen 30090; + candidate $CANDIDATE; + } + vhost __defaultVhost__ { + http_hooks { + enabled on; + on_publish http://helm-live-op-svc-v2:8080/hooks/on_push; + } + http_remux { + enabled on; + } + rtc { + enabled on; + rtmp_to_rtc on; + rtc_to_rtmp on; + keep_bframe off; + } + tcp_nodelay on; + min_latency on; + play { + gop_cache off; + mw_latency 100; + mw_msgs 10; + } + publish { + firstpkt_timeout 8000; + normal_timeout 4000; + mr on; + } + dvr { + enabled off; + dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4; + dvr_plan session; + } + hls { + enabled on; + hls_path /home/hls; + hls_fragment 10; + hls_window 60; + hls_m3u8_file [app]/[stream].m3u8; + hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts; + hls_cleanup on; + hls_entry_prefix http://10.250.0.200:8888; + } + } +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc-exporter + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + nodePort: 30935 + - name: rtc + protocol: UDP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: rtc-tcp + protocol: TCP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: srt + protocol: UDP + port: 30556 + targetPort: 30556 + nodePort: 30556 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + nodePort: 30557 + selector: + srs-role: rtc + type: NodePort + sessionAffinity: None + externalTrafficPolicy: Cluster + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srsrtc-svc + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: helm-live-srs-rtc + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-srs + cmii.type: live + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 + srs-role: rtc +spec: + replicas: 1 + selector: + matchLabels: + srs-role: rtc + template: + metadata: + labels: + srs-role: rtc + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-srs-cm + items: + - key: srs.rtc.conf + path: docker.conf + defaultMode: 420 + - name: srs-vol + emptyDir: + sizeLimit: 8Gi + containers: + - name: srs-rtc + image: 10.250.0.200:8033/cmii/srs:v5.0.195 + ports: + - name: srs-rtmp + containerPort: 30935 + protocol: TCP + - name: srs-api + containerPort: 1985 + protocol: TCP + - name: srs-flv + containerPort: 8080 + protocol: TCP + - name: srs-webrtc + containerPort: 30090 + protocol: UDP + - name: srs-webrtc-tcp + containerPort: 30090 + protocol: TCP + - name: srs-srt + containerPort: 30556 + protocol: UDP + env: + - name: CANDIDATE + value: 10.250.0.200 + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /usr/local/srs/conf/docker.conf + subPath: docker.conf + - name: srs-vol + mountPath: /home/dvr + subPath: bjtg/helm-live/dvr + - name: srs-vol + mountPath: /home/hls + subPath: bjtg/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + - name: oss-adaptor + image: 10.250.0.200:8033/cmii/cmii-srs-oss-adaptor:2023-SA + env: + - name: OSS_ENDPOINT + value: 'http://:9000' + - name: OSS_AK + value: cmii + - name: OSS_SK + value: 'B#923fC7mk' + - name: OSS_BUCKET + value: live-cluster-hls + - name: SRS_OP + value: 'http://helm-live-op-svc-v2:8080' + - name: MYSQL_ENDPOINT + value: 'helm-mysql:3306' + - name: MYSQL_USERNAME + value: k8s_admin + - name: MYSQL_PASSWORD + value: fP#UaH6qQ3)8 + - name: MYSQL_DATABASE + value: cmii_live_srs_op + - name: MYSQL_TABLE + value: live_segment + - name: LOG_LEVEL + value: info + - name: OSS_META + value: 'yes' + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-vol + mountPath: /cmii/share/hls + subPath: bjtg/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + serviceName: helm-live-srsrtc-svc + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: 0 + revisionHistoryLimit: 10 +--- +# live-srs部分 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helm-live-op-v2 + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live + helm.sh/chart: cmlc-live-live-op-2.0.0 + live-role: op-v2 +spec: + replicas: 1 + selector: + matchLabels: + live-role: op-v2 + template: + metadata: + labels: + live-role: op-v2 + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-op-cm-v2 + items: + - key: live.op.conf + path: bootstrap.yaml + defaultMode: 420 + containers: + - name: helm-live-op-v2 + image: 10.250.0.200:8033/cmii/cmii-live-operator:5.2.0 + ports: + - name: operator + containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 4800m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /cmii/bootstrap.yaml + subPath: bootstrap.yaml + livenessProbe: + httpGet: + path: /cmii/health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + maxSurge: 25% + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc-v2 + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + nodePort: 30333 + selector: + live-role: op-v2 + type: NodePort + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + live-role: op + type: ClusterIP + sessionAffinity: None +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-op-cm-v2 + namespace: bjtg + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live +data: + live.op.conf: |- + server: + port: 8080 + spring: + main: + allow-bean-definition-overriding: true + allow-circular-references: true + application: + name: cmii-live-operator + platform: + info: + name: cmii-live-operator + description: cmii-live-operator + version: 5.6.0 + scanPackage: com.cmii.live.op + cloud: + nacos: + config: + username: developer + password: N@cos14Good + server-addr: helm-nacos:8848 + extension-configs: + - data-id: cmii-live-operator.yml + group: 5.6.0 + refresh: true + shared-configs: + - data-id: cmii-backend-system.yml + group: 5.6.0 + refresh: true + discovery: + enabled: false + + live: + engine: + type: srs + endpoint: 'http://helm-live-srs-svc:1985' + proto: + rtmp: 'rtmp://10.250.0.200:30935' + rtsp: 'rtsp://10.250.0.200:30554' + srt: 'srt://10.250.0.200:30556' + flv: 'http://10.250.0.200:30500' + hls: 'http://10.250.0.200:30500' + rtc: 'webrtc://10.250.0.200:30090' + replay: 'https://10.250.0.200:30333' + minio: + endpoint: http://:9000 + access-key: cmii + secret-key: B#923fC7mk + bucket: live-cluster-hls diff --git a/agent-operator/real_project/cqga/0-install_all_demand.sh b/agent-common/real_project/cqga/0-install_all_demand.sh similarity index 100% rename from agent-operator/real_project/cqga/0-install_all_demand.sh rename to agent-common/real_project/cqga/0-install_all_demand.sh diff --git a/agent-operator/real_project/cqga/1.mount_volume.sh b/agent-common/real_project/cqga/1.mount_volume.sh similarity index 100% rename from agent-operator/real_project/cqga/1.mount_volume.sh rename to agent-common/real_project/cqga/1.mount_volume.sh diff --git a/agent-operator/real_project/cqga/2-harbor-docker-compose.yaml b/agent-common/real_project/cqga/2-harbor-docker-compose.yaml similarity index 100% rename from agent-operator/real_project/cqga/2-harbor-docker-compose.yaml rename to agent-common/real_project/cqga/2-harbor-docker-compose.yaml diff --git a/agent-operator/real_project/cqga/3-rke-cluster.yml b/agent-common/real_project/cqga/3-rke-cluster.yml similarity index 100% rename from agent-operator/real_project/cqga/3-rke-cluster.yml rename to agent-common/real_project/cqga/3-rke-cluster.yml diff --git a/agent-operator/real_project/cqga/operator.go b/agent-common/real_project/cqga/operator.go similarity index 100% rename from agent-operator/real_project/cqga/operator.go rename to agent-common/real_project/cqga/operator.go diff --git a/agent-operator/real_project/cqga/project-info.txt b/agent-common/real_project/cqga/project-info.txt similarity index 100% rename from agent-operator/real_project/cqga/project-info.txt rename to agent-common/real_project/cqga/project-info.txt diff --git a/agent-operator/real_project/octopus-agent-run.txt b/agent-common/real_project/octopus-agent-run.txt similarity index 100% rename from agent-operator/real_project/octopus-agent-run.txt rename to agent-common/real_project/octopus-agent-run.txt diff --git a/agent-operator/real_project/proxy_project/linux/port_linux_amd64 b/agent-common/real_project/proxy_project/linux/port_linux_amd64 similarity index 100% rename from agent-operator/real_project/proxy_project/linux/port_linux_amd64 rename to agent-common/real_project/proxy_project/linux/port_linux_amd64 diff --git a/agent-operator/real_project/proxy_project/linux/socks5_linux_amd64 b/agent-common/real_project/proxy_project/linux/socks5_linux_amd64 similarity index 100% rename from agent-operator/real_project/proxy_project/linux/socks5_linux_amd64 rename to agent-common/real_project/proxy_project/linux/socks5_linux_amd64 diff --git a/agent-operator/real_project/proxy_project/windows/port_win64.exe b/agent-common/real_project/proxy_project/windows/port_win64.exe similarity index 100% rename from agent-operator/real_project/proxy_project/windows/port_win64.exe rename to agent-common/real_project/proxy_project/windows/port_win64.exe diff --git a/agent-operator/real_project/proxy_project/windows/socks5_win64.exe b/agent-common/real_project/proxy_project/windows/socks5_win64.exe similarity index 100% rename from agent-operator/real_project/proxy_project/windows/socks5_win64.exe rename to agent-common/real_project/proxy_project/windows/socks5_win64.exe diff --git a/agent-operator/real_project/szga/Config.go b/agent-common/real_project/szga/Config.go similarity index 100% rename from agent-operator/real_project/szga/Config.go rename to agent-common/real_project/szga/Config.go diff --git a/agent-deploy/uavcloud-dev/k8s-backend.yaml b/agent-common/real_project/uavcloud-dev/k8s-backend.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-backend.yaml rename to agent-common/real_project/uavcloud-dev/k8s-backend.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-configmap.yaml b/agent-common/real_project/uavcloud-dev/k8s-configmap.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-configmap.yaml rename to agent-common/real_project/uavcloud-dev/k8s-configmap.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-emqx.yaml b/agent-common/real_project/uavcloud-dev/k8s-emqx.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-emqx.yaml rename to agent-common/real_project/uavcloud-dev/k8s-emqx.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-frontend.yaml b/agent-common/real_project/uavcloud-dev/k8s-frontend.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-frontend.yaml rename to agent-common/real_project/uavcloud-dev/k8s-frontend.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-ingress.yaml b/agent-common/real_project/uavcloud-dev/k8s-ingress.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-ingress.yaml rename to agent-common/real_project/uavcloud-dev/k8s-ingress.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-mongo.yaml b/agent-common/real_project/uavcloud-dev/k8s-mongo.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-mongo.yaml rename to agent-common/real_project/uavcloud-dev/k8s-mongo.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-mysql.yaml b/agent-common/real_project/uavcloud-dev/k8s-mysql.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-mysql.yaml rename to agent-common/real_project/uavcloud-dev/k8s-mysql.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-nacos.yaml b/agent-common/real_project/uavcloud-dev/k8s-nacos.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-nacos.yaml rename to agent-common/real_project/uavcloud-dev/k8s-nacos.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-pvc.yaml b/agent-common/real_project/uavcloud-dev/k8s-pvc.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-pvc.yaml rename to agent-common/real_project/uavcloud-dev/k8s-pvc.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-rabbitmq.yaml b/agent-common/real_project/uavcloud-dev/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-rabbitmq.yaml rename to agent-common/real_project/uavcloud-dev/k8s-rabbitmq.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-redis.yaml b/agent-common/real_project/uavcloud-dev/k8s-redis.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-redis.yaml rename to agent-common/real_project/uavcloud-dev/k8s-redis.yaml diff --git a/agent-deploy/uavcloud-dev/k8s-srs.yaml b/agent-common/real_project/uavcloud-dev/k8s-srs.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/k8s-srs.yaml rename to agent-common/real_project/uavcloud-dev/k8s-srs.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-backend.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-backend.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-backend.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-backend.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-configmap.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-configmap.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-configmap.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-configmap.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-emqx.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-emqx.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-emqx.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-emqx.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-frontend.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-frontend.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-frontend.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-frontend.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-ingress.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-ingress.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-ingress.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-ingress.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-mongo.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-mongo.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-mongo.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-mongo.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-mysql.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-mysql.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-mysql.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-mysql.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-nacos.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-nacos.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-nacos.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-nacos.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-pvc.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-pvc.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-pvc.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-pvc.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-rabbitmq.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-rabbitmq.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-rabbitmq.yaml diff --git a/agent-deploy/uavcloud-dev/old/k8s-redis.yaml b/agent-common/real_project/uavcloud-dev/old/k8s-redis.yaml similarity index 100% rename from agent-deploy/uavcloud-dev/old/k8s-redis.yaml rename to agent-common/real_project/uavcloud-dev/old/k8s-redis.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-backend.yaml b/agent-common/real_project/uavcloud-devflight/k8s-backend.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-backend.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-backend.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-configmap.yaml b/agent-common/real_project/uavcloud-devflight/k8s-configmap.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-configmap.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-configmap.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-emqx.yaml b/agent-common/real_project/uavcloud-devflight/k8s-emqx.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-emqx.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-emqx.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-frontend.yaml b/agent-common/real_project/uavcloud-devflight/k8s-frontend.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-frontend.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-frontend.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-ingress.yaml b/agent-common/real_project/uavcloud-devflight/k8s-ingress.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-ingress.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-ingress.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-mongo.yaml b/agent-common/real_project/uavcloud-devflight/k8s-mongo.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-mongo.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-mongo.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-mysql.yaml b/agent-common/real_project/uavcloud-devflight/k8s-mysql.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-mysql.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-mysql.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-nacos.yaml b/agent-common/real_project/uavcloud-devflight/k8s-nacos.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-nacos.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-nacos.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-pvc.yaml b/agent-common/real_project/uavcloud-devflight/k8s-pvc.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-pvc.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-pvc.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-rabbitmq.yaml b/agent-common/real_project/uavcloud-devflight/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-rabbitmq.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-rabbitmq.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-redis.yaml b/agent-common/real_project/uavcloud-devflight/k8s-redis.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-redis.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-redis.yaml diff --git a/agent-deploy/uavcloud-devflight/k8s-srs.yaml b/agent-common/real_project/uavcloud-devflight/k8s-srs.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/k8s-srs.yaml rename to agent-common/real_project/uavcloud-devflight/k8s-srs.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-backend.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-backend.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-backend.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-backend.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-configmap.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-configmap.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-configmap.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-configmap.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-emqx.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-emqx.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-emqx.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-emqx.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-frontend.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-frontend.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-frontend.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-frontend.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-ingress.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-ingress.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-ingress.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-ingress.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-mongo.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-mongo.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-mongo.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-mongo.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-mysql.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-mysql.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-mysql.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-mysql.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-nacos.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-nacos.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-nacos.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-nacos.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-pvc.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-pvc.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-pvc.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-pvc.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-rabbitmq.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-rabbitmq.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-rabbitmq.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-redis.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-redis.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-redis.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-redis.yaml diff --git a/agent-deploy/uavcloud-devflight/old/k8s-srs.yaml b/agent-common/real_project/uavcloud-devflight/old/k8s-srs.yaml similarity index 100% rename from agent-deploy/uavcloud-devflight/old/k8s-srs.yaml rename to agent-common/real_project/uavcloud-devflight/old/k8s-srs.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-backend.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-backend.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-backend.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-backend.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-configmap.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-configmap.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-configmap.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-configmap.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-emqx.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-emqx.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-emqx.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-emqx.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-frontend.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-frontend.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-frontend.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-frontend.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-ingress.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-ingress.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-ingress.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-ingress.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-mongo.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-mongo.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-mongo.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-mongo.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-mysql.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-mysql.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-mysql.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-mysql.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-nacos.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-nacos.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-nacos.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-nacos.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-pvc.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-pvc.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-pvc.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-pvc.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-rabbitmq.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-rabbitmq.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-rabbitmq.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-redis.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-redis.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-redis.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-redis.yaml diff --git a/agent-deploy/uavcloud-devoperation/k8s-srs.yaml b/agent-common/real_project/uavcloud-devoperation/k8s-srs.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/k8s-srs.yaml rename to agent-common/real_project/uavcloud-devoperation/k8s-srs.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-backend.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-backend.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-backend.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-backend.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-configmap.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-configmap.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-configmap.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-configmap.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-emqx.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-emqx.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-emqx.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-emqx.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-frontend.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-frontend.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-frontend.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-frontend.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-ingress.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-ingress.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-ingress.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-ingress.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-mongo.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-mongo.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-mongo.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-mongo.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-mysql.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-mysql.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-mysql.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-mysql.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-nacos.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-nacos.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-nacos.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-nacos.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-pvc.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-pvc.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-pvc.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-pvc.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-rabbitmq.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-rabbitmq.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-rabbitmq.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-redis.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-redis.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-redis.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-redis.yaml diff --git a/agent-deploy/uavcloud-devoperation/old/k8s-srs.yaml b/agent-common/real_project/uavcloud-devoperation/old/k8s-srs.yaml similarity index 100% rename from agent-deploy/uavcloud-devoperation/old/k8s-srs.yaml rename to agent-common/real_project/uavcloud-devoperation/old/k8s-srs.yaml diff --git a/agent-deploy/wrj/k8s-backend.yaml b/agent-common/real_project/wrj/k8s-backend.yaml similarity index 100% rename from agent-deploy/wrj/k8s-backend.yaml rename to agent-common/real_project/wrj/k8s-backend.yaml diff --git a/agent-deploy/wrj/k8s-configmap.yaml b/agent-common/real_project/wrj/k8s-configmap.yaml similarity index 100% rename from agent-deploy/wrj/k8s-configmap.yaml rename to agent-common/real_project/wrj/k8s-configmap.yaml diff --git a/agent-deploy/wrj/k8s-emqx.yaml b/agent-common/real_project/wrj/k8s-emqx.yaml similarity index 100% rename from agent-deploy/wrj/k8s-emqx.yaml rename to agent-common/real_project/wrj/k8s-emqx.yaml diff --git a/agent-deploy/wrj/k8s-frontend.yaml b/agent-common/real_project/wrj/k8s-frontend.yaml similarity index 100% rename from agent-deploy/wrj/k8s-frontend.yaml rename to agent-common/real_project/wrj/k8s-frontend.yaml diff --git a/agent-deploy/wrj/k8s-ingress.yaml b/agent-common/real_project/wrj/k8s-ingress.yaml similarity index 100% rename from agent-deploy/wrj/k8s-ingress.yaml rename to agent-common/real_project/wrj/k8s-ingress.yaml diff --git a/agent-deploy/wrj/k8s-mongo.yaml b/agent-common/real_project/wrj/k8s-mongo.yaml similarity index 100% rename from agent-deploy/wrj/k8s-mongo.yaml rename to agent-common/real_project/wrj/k8s-mongo.yaml diff --git a/agent-deploy/wrj/k8s-mysql.yaml b/agent-common/real_project/wrj/k8s-mysql.yaml similarity index 100% rename from agent-deploy/wrj/k8s-mysql.yaml rename to agent-common/real_project/wrj/k8s-mysql.yaml diff --git a/agent-deploy/wrj/k8s-nacos.yaml b/agent-common/real_project/wrj/k8s-nacos.yaml similarity index 100% rename from agent-deploy/wrj/k8s-nacos.yaml rename to agent-common/real_project/wrj/k8s-nacos.yaml diff --git a/agent-deploy/wrj/k8s-pvc.yaml b/agent-common/real_project/wrj/k8s-pvc.yaml similarity index 100% rename from agent-deploy/wrj/k8s-pvc.yaml rename to agent-common/real_project/wrj/k8s-pvc.yaml diff --git a/agent-deploy/wrj/k8s-rabbitmq.yaml b/agent-common/real_project/wrj/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/wrj/k8s-rabbitmq.yaml rename to agent-common/real_project/wrj/k8s-rabbitmq.yaml diff --git a/agent-deploy/wrj/k8s-redis.yaml b/agent-common/real_project/wrj/k8s-redis.yaml similarity index 100% rename from agent-deploy/wrj/k8s-redis.yaml rename to agent-common/real_project/wrj/k8s-redis.yaml diff --git a/agent-deploy/wrj/k8s-srs.yaml b/agent-common/real_project/wrj/k8s-srs.yaml similarity index 100% rename from agent-deploy/wrj/k8s-srs.yaml rename to agent-common/real_project/wrj/k8s-srs.yaml diff --git a/agent-deploy/wrj/old/k8s-backend.yaml b/agent-common/real_project/wrj/old/k8s-backend.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-backend.yaml rename to agent-common/real_project/wrj/old/k8s-backend.yaml diff --git a/agent-deploy/wrj/old/k8s-configmap.yaml b/agent-common/real_project/wrj/old/k8s-configmap.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-configmap.yaml rename to agent-common/real_project/wrj/old/k8s-configmap.yaml diff --git a/agent-deploy/wrj/old/k8s-emqx.yaml b/agent-common/real_project/wrj/old/k8s-emqx.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-emqx.yaml rename to agent-common/real_project/wrj/old/k8s-emqx.yaml diff --git a/agent-deploy/wrj/old/k8s-frontend.yaml b/agent-common/real_project/wrj/old/k8s-frontend.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-frontend.yaml rename to agent-common/real_project/wrj/old/k8s-frontend.yaml diff --git a/agent-deploy/wrj/old/k8s-ingress.yaml b/agent-common/real_project/wrj/old/k8s-ingress.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-ingress.yaml rename to agent-common/real_project/wrj/old/k8s-ingress.yaml diff --git a/agent-deploy/wrj/old/k8s-mongo.yaml b/agent-common/real_project/wrj/old/k8s-mongo.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-mongo.yaml rename to agent-common/real_project/wrj/old/k8s-mongo.yaml diff --git a/agent-deploy/wrj/old/k8s-mysql.yaml b/agent-common/real_project/wrj/old/k8s-mysql.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-mysql.yaml rename to agent-common/real_project/wrj/old/k8s-mysql.yaml diff --git a/agent-deploy/wrj/old/k8s-nacos.yaml b/agent-common/real_project/wrj/old/k8s-nacos.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-nacos.yaml rename to agent-common/real_project/wrj/old/k8s-nacos.yaml diff --git a/agent-deploy/wrj/old/k8s-pvc.yaml b/agent-common/real_project/wrj/old/k8s-pvc.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-pvc.yaml rename to agent-common/real_project/wrj/old/k8s-pvc.yaml diff --git a/agent-deploy/wrj/old/k8s-rabbitmq.yaml b/agent-common/real_project/wrj/old/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-rabbitmq.yaml rename to agent-common/real_project/wrj/old/k8s-rabbitmq.yaml diff --git a/agent-deploy/wrj/old/k8s-redis.yaml b/agent-common/real_project/wrj/old/k8s-redis.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-redis.yaml rename to agent-common/real_project/wrj/old/k8s-redis.yaml diff --git a/agent-deploy/wrj/old/k8s-srs.yaml b/agent-common/real_project/wrj/old/k8s-srs.yaml similarity index 100% rename from agent-deploy/wrj/old/k8s-srs.yaml rename to agent-common/real_project/wrj/old/k8s-srs.yaml diff --git a/agent-operator/real_project/xmyd/CmiiConfig.go b/agent-common/real_project/xmyd/CmiiConfig.go similarity index 100% rename from agent-operator/real_project/xmyd/CmiiConfig.go rename to agent-common/real_project/xmyd/CmiiConfig.go diff --git a/agent-operator/real_project/xzyd/Config.go b/agent-common/real_project/xzyd/Config.go similarity index 100% rename from agent-operator/real_project/xzyd/Config.go rename to agent-common/real_project/xzyd/Config.go diff --git a/agent-deploy/z_5.5.0/k8s-backend.yaml b/agent-common/real_project/z_5.5.0/k8s-backend.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-backend.yaml rename to agent-common/real_project/z_5.5.0/k8s-backend.yaml diff --git a/agent-deploy/z_5.5.0/k8s-configmap.yaml b/agent-common/real_project/z_5.5.0/k8s-configmap.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-configmap.yaml rename to agent-common/real_project/z_5.5.0/k8s-configmap.yaml diff --git a/agent-deploy/z_5.5.0/k8s-dashboard.yaml b/agent-common/real_project/z_5.5.0/k8s-dashboard.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-dashboard.yaml rename to agent-common/real_project/z_5.5.0/k8s-dashboard.yaml diff --git a/agent-deploy/z_5.5.0/k8s-emqx.yaml b/agent-common/real_project/z_5.5.0/k8s-emqx.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-emqx.yaml rename to agent-common/real_project/z_5.5.0/k8s-emqx.yaml diff --git a/agent-deploy/z_5.5.0/k8s-frontend.yaml b/agent-common/real_project/z_5.5.0/k8s-frontend.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-frontend.yaml rename to agent-common/real_project/z_5.5.0/k8s-frontend.yaml diff --git a/agent-deploy/z_5.5.0/k8s-ingress.yaml b/agent-common/real_project/z_5.5.0/k8s-ingress.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-ingress.yaml rename to agent-common/real_project/z_5.5.0/k8s-ingress.yaml diff --git a/agent-deploy/z_5.5.0/k8s-mongo.yaml b/agent-common/real_project/z_5.5.0/k8s-mongo.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-mongo.yaml rename to agent-common/real_project/z_5.5.0/k8s-mongo.yaml diff --git a/agent-deploy/z_5.5.0/k8s-mysql.yaml b/agent-common/real_project/z_5.5.0/k8s-mysql.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-mysql.yaml rename to agent-common/real_project/z_5.5.0/k8s-mysql.yaml diff --git a/agent-deploy/z_5.5.0/k8s-nacos.yaml b/agent-common/real_project/z_5.5.0/k8s-nacos.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-nacos.yaml rename to agent-common/real_project/z_5.5.0/k8s-nacos.yaml diff --git a/agent-deploy/z_5.5.0/k8s-nfs-test.yaml b/agent-common/real_project/z_5.5.0/k8s-nfs-test.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-nfs-test.yaml rename to agent-common/real_project/z_5.5.0/k8s-nfs-test.yaml diff --git a/agent-deploy/z_5.5.0/k8s-nfs.yaml b/agent-common/real_project/z_5.5.0/k8s-nfs.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-nfs.yaml rename to agent-common/real_project/z_5.5.0/k8s-nfs.yaml diff --git a/agent-deploy/z_5.5.0/k8s-pvc.yaml b/agent-common/real_project/z_5.5.0/k8s-pvc.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-pvc.yaml rename to agent-common/real_project/z_5.5.0/k8s-pvc.yaml diff --git a/agent-deploy/z_5.5.0/k8s-rabbitmq.yaml b/agent-common/real_project/z_5.5.0/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-rabbitmq.yaml rename to agent-common/real_project/z_5.5.0/k8s-rabbitmq.yaml diff --git a/agent-deploy/z_5.5.0/k8s-redis.yaml b/agent-common/real_project/z_5.5.0/k8s-redis.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-redis.yaml rename to agent-common/real_project/z_5.5.0/k8s-redis.yaml diff --git a/agent-deploy/z_5.5.0/k8s-srs.yaml b/agent-common/real_project/z_5.5.0/k8s-srs.yaml similarity index 100% rename from agent-deploy/z_5.5.0/k8s-srs.yaml rename to agent-common/real_project/z_5.5.0/k8s-srs.yaml diff --git a/agent-deploy/z_bjtg/k8s-backend.yaml b/agent-common/real_project/z_bjtg/k8s-backend.yaml similarity index 100% rename from agent-deploy/z_bjtg/k8s-backend.yaml rename to agent-common/real_project/z_bjtg/k8s-backend.yaml diff --git a/agent-deploy/z_bjtg/k8s-configmap.yaml b/agent-common/real_project/z_bjtg/k8s-configmap.yaml similarity index 100% rename from agent-deploy/z_bjtg/k8s-configmap.yaml rename to agent-common/real_project/z_bjtg/k8s-configmap.yaml diff --git a/agent-deploy/z_bjtg/k8s-frontend.yaml b/agent-common/real_project/z_bjtg/k8s-frontend.yaml similarity index 100% rename from agent-deploy/z_bjtg/k8s-frontend.yaml rename to agent-common/real_project/z_bjtg/k8s-frontend.yaml diff --git a/agent-deploy/z_file/k8s-backend.yaml b/agent-common/real_project/z_file/k8s-backend.yaml similarity index 100% rename from agent-deploy/z_file/k8s-backend.yaml rename to agent-common/real_project/z_file/k8s-backend.yaml diff --git a/agent-deploy/z_file/k8s-configmap.yaml b/agent-common/real_project/z_file/k8s-configmap.yaml similarity index 100% rename from agent-deploy/z_file/k8s-configmap.yaml rename to agent-common/real_project/z_file/k8s-configmap.yaml diff --git a/agent-deploy/z_file/k8s-dashboard.yaml b/agent-common/real_project/z_file/k8s-dashboard.yaml similarity index 100% rename from agent-deploy/z_file/k8s-dashboard.yaml rename to agent-common/real_project/z_file/k8s-dashboard.yaml diff --git a/agent-deploy/z_file/k8s-emqx.yaml b/agent-common/real_project/z_file/k8s-emqx.yaml similarity index 100% rename from agent-deploy/z_file/k8s-emqx.yaml rename to agent-common/real_project/z_file/k8s-emqx.yaml diff --git a/agent-deploy/z_file/k8s-frontend.yaml b/agent-common/real_project/z_file/k8s-frontend.yaml similarity index 100% rename from agent-deploy/z_file/k8s-frontend.yaml rename to agent-common/real_project/z_file/k8s-frontend.yaml diff --git a/agent-deploy/z_file/k8s-ingress.yaml b/agent-common/real_project/z_file/k8s-ingress.yaml similarity index 100% rename from agent-deploy/z_file/k8s-ingress.yaml rename to agent-common/real_project/z_file/k8s-ingress.yaml diff --git a/agent-deploy/z_file/k8s-mongo.yaml b/agent-common/real_project/z_file/k8s-mongo.yaml similarity index 100% rename from agent-deploy/z_file/k8s-mongo.yaml rename to agent-common/real_project/z_file/k8s-mongo.yaml diff --git a/agent-deploy/z_file/k8s-mysql.yaml b/agent-common/real_project/z_file/k8s-mysql.yaml similarity index 100% rename from agent-deploy/z_file/k8s-mysql.yaml rename to agent-common/real_project/z_file/k8s-mysql.yaml diff --git a/agent-deploy/z_file/k8s-nacos.yaml b/agent-common/real_project/z_file/k8s-nacos.yaml similarity index 100% rename from agent-deploy/z_file/k8s-nacos.yaml rename to agent-common/real_project/z_file/k8s-nacos.yaml diff --git a/agent-deploy/z_file/k8s-nfs-test.yaml b/agent-common/real_project/z_file/k8s-nfs-test.yaml similarity index 100% rename from agent-deploy/z_file/k8s-nfs-test.yaml rename to agent-common/real_project/z_file/k8s-nfs-test.yaml diff --git a/agent-deploy/z_file/k8s-nfs.yaml b/agent-common/real_project/z_file/k8s-nfs.yaml similarity index 100% rename from agent-deploy/z_file/k8s-nfs.yaml rename to agent-common/real_project/z_file/k8s-nfs.yaml diff --git a/agent-deploy/z_file/k8s-pvc.yaml b/agent-common/real_project/z_file/k8s-pvc.yaml similarity index 100% rename from agent-deploy/z_file/k8s-pvc.yaml rename to agent-common/real_project/z_file/k8s-pvc.yaml diff --git a/agent-deploy/z_file/k8s-rabbitmq.yaml b/agent-common/real_project/z_file/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/z_file/k8s-rabbitmq.yaml rename to agent-common/real_project/z_file/k8s-rabbitmq.yaml diff --git a/agent-deploy/z_file/k8s-redis.yaml b/agent-common/real_project/z_file/k8s-redis.yaml similarity index 100% rename from agent-deploy/z_file/k8s-redis.yaml rename to agent-common/real_project/z_file/k8s-redis.yaml diff --git a/agent-deploy/z_file/k8s-srs.yaml b/agent-common/real_project/z_file/k8s-srs.yaml similarity index 100% rename from agent-deploy/z_file/k8s-srs.yaml rename to agent-common/real_project/z_file/k8s-srs.yaml diff --git a/agent-deploy/z_xjyd/k8s-backend.yaml b/agent-common/real_project/z_xjyd/k8s-backend.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-backend.yaml rename to agent-common/real_project/z_xjyd/k8s-backend.yaml diff --git a/agent-deploy/z_xjyd/k8s-configmap.yaml b/agent-common/real_project/z_xjyd/k8s-configmap.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-configmap.yaml rename to agent-common/real_project/z_xjyd/k8s-configmap.yaml diff --git a/agent-deploy/z_xjyd/k8s-dashboard.yaml b/agent-common/real_project/z_xjyd/k8s-dashboard.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-dashboard.yaml rename to agent-common/real_project/z_xjyd/k8s-dashboard.yaml diff --git a/agent-deploy/z_xjyd/k8s-emqx.yaml b/agent-common/real_project/z_xjyd/k8s-emqx.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-emqx.yaml rename to agent-common/real_project/z_xjyd/k8s-emqx.yaml diff --git a/agent-deploy/z_xjyd/k8s-frontend.yaml b/agent-common/real_project/z_xjyd/k8s-frontend.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-frontend.yaml rename to agent-common/real_project/z_xjyd/k8s-frontend.yaml diff --git a/agent-deploy/z_xjyd/k8s-ingress.yaml b/agent-common/real_project/z_xjyd/k8s-ingress.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-ingress.yaml rename to agent-common/real_project/z_xjyd/k8s-ingress.yaml diff --git a/agent-deploy/z_xjyd/k8s-mongo.yaml b/agent-common/real_project/z_xjyd/k8s-mongo.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-mongo.yaml rename to agent-common/real_project/z_xjyd/k8s-mongo.yaml diff --git a/agent-deploy/z_xjyd/k8s-mysql.yaml b/agent-common/real_project/z_xjyd/k8s-mysql.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-mysql.yaml rename to agent-common/real_project/z_xjyd/k8s-mysql.yaml diff --git a/agent-deploy/z_xjyd/k8s-nacos.yaml b/agent-common/real_project/z_xjyd/k8s-nacos.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-nacos.yaml rename to agent-common/real_project/z_xjyd/k8s-nacos.yaml diff --git a/agent-deploy/z_xjyd/k8s-nfs-test.yaml b/agent-common/real_project/z_xjyd/k8s-nfs-test.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-nfs-test.yaml rename to agent-common/real_project/z_xjyd/k8s-nfs-test.yaml diff --git a/agent-deploy/z_xjyd/k8s-nfs.yaml b/agent-common/real_project/z_xjyd/k8s-nfs.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-nfs.yaml rename to agent-common/real_project/z_xjyd/k8s-nfs.yaml diff --git a/agent-deploy/z_xjyd/k8s-pvc.yaml b/agent-common/real_project/z_xjyd/k8s-pvc.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-pvc.yaml rename to agent-common/real_project/z_xjyd/k8s-pvc.yaml diff --git a/agent-deploy/z_xjyd/k8s-rabbitmq.yaml b/agent-common/real_project/z_xjyd/k8s-rabbitmq.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-rabbitmq.yaml rename to agent-common/real_project/z_xjyd/k8s-rabbitmq.yaml diff --git a/agent-deploy/z_xjyd/k8s-redis.yaml b/agent-common/real_project/z_xjyd/k8s-redis.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-redis.yaml rename to agent-common/real_project/z_xjyd/k8s-redis.yaml diff --git a/agent-deploy/z_xjyd/k8s-srs.yaml b/agent-common/real_project/z_xjyd/k8s-srs.yaml similarity index 100% rename from agent-deploy/z_xjyd/k8s-srs.yaml rename to agent-common/real_project/z_xjyd/k8s-srs.yaml diff --git a/agent-common/real_project/zhejianyidong_erjipingtai/Config.go b/agent-common/real_project/zhejianyidong_erjipingtai/Config.go new file mode 100644 index 0000000..5980e48 --- /dev/null +++ b/agent-common/real_project/zhejianyidong_erjipingtai/Config.go @@ -0,0 +1,70 @@ +package zhejianyidong_erjipingtai + +var Cmii570ImageList = []string{ + "harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:5.6.0-30403-071802", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta", + "harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392", + "harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0", + "harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708", + "harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.7.0-29668-071901", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation", + "harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:5.7.0-071815", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0", + "harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195", + "harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810", + "harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA", +} diff --git a/agent-operator/real_project/zjjt/Config.go b/agent-common/real_project/zjjt/Config.go similarity index 100% rename from agent-operator/real_project/zjjt/Config.go rename to agent-common/real_project/zjjt/Config.go diff --git a/agent-operator/real_project/zjjt/RealImageConfig.go b/agent-common/real_project/zjjt/RealImageConfig.go similarity index 100% rename from agent-operator/real_project/zjjt/RealImageConfig.go rename to agent-common/real_project/zjjt/RealImageConfig.go diff --git a/agent-common/real_project/zjyd/k8s-backend.yaml b/agent-common/real_project/zjyd/k8s-backend.yaml new file mode 100644 index 0000000..5a39ed6 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-backend.yaml @@ -0,0 +1,5699 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-brain + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-brain + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-brain + image: 192.168.10.3:8033/cmii/cmii-uav-brain:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-brain + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-brain + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-brain + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-brain + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-process + image: 192.168.10.3:8033/cmii/cmii-uav-process:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-process + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-surveillance + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-surveillance + image: 192.168.10.3:8033/cmii/cmii-uav-surveillance:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-surveillance + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-surveillance + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-surveillance + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-surveillance + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-integration + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-integration + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-integration + image: 192.168.10.3:8033/cmii/cmii-uav-integration:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-integration + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-integration + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-integration + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-integration + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-depotautoreturn + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-depotautoreturn + image: 192.168.10.3:8033/cmii/cmii-uav-depotautoreturn:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-depotautoreturn + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-depotautoreturn + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-depotautoreturn + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-tower + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-tower + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-tower + image: 192.168.10.3:8033/cmii/cmii-uav-tower:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-tower + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-tower + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-tower + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-tower + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-notice + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-notice + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-notice + image: 192.168.10.3:8033/cmii/cmii-uav-notice:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-notice + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-notice + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-notice + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-notice + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cloud-live + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cloud-live + image: 192.168.10.3:8033/cmii/cmii-uav-cloud-live:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-cloud-live + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-cloud-live + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cloud-live + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-datasource + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-datasource + image: 192.168.10.3:8033/cmii/cmii-uav-grid-datasource:5.2.0-24810 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-grid-datasource + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-grid-datasource + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-datasource + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-datasource + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-device + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-device + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-device + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-device + image: 192.168.10.3:8033/cmii/cmii-uav-device:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-device + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-device + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-device + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-device + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-waypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-waypoint + image: 192.168.10.3:8033/cmii/cmii-uav-waypoint:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-waypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-waypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-waypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-waypoint + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cms + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cms + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cms + image: 192.168.10.3:8033/cmii/cmii-uav-cms:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-cms + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-cms + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cms + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cms + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-sense-adapter + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-sense-adapter + image: 192.168.10.3:8033/cmii/cmii-uav-sense-adapter:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-sense-adapter + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-sense-adapter + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-sense-adapter + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-data + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-data + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-data + image: 192.168.10.3:8033/cmii/cmii-admin-data:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-admin-data + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-admin-data + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-data + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-data + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uas-lifecycle + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uas-lifecycle + image: 192.168.10.3:8033/cmii/cmii-uas-lifecycle:5.6.0-30403-071802 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uas-lifecycle + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uas-lifecycle + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uas-lifecycle + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-data-post-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-data-post-process + image: 192.168.10.3:8033/cmii/cmii-uav-data-post-process:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-data-post-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-data-post-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-data-post-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uas-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uas-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uas-gateway + image: 192.168.10.3:8033/cmii/cmii-uas-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uas-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uas-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uas-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uas-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uas-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-industrial-portfolio + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-industrial-portfolio + image: 192.168.10.3:8033/cmii/cmii-uav-industrial-portfolio:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-industrial-portfolio + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-industrial-portfolio + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-industrial-portfolio + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-alarm + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-alarm + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-alarm + image: 192.168.10.3:8033/cmii/cmii-uav-alarm:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-alarm + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-alarm + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-alarm + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-alarm + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-alarm + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-supervision + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-suav-supervision + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-supervision + image: 192.168.10.3:8033/cmii/cmii-suav-supervision:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-suav-supervision + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-suav-supervision + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-supervision + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-suav-supervision + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-developer + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-developer + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-developer + image: 192.168.10.3:8033/cmii/cmii-uav-developer:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-developer + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-developer + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-developer + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-developer + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gateway + image: 192.168.10.3:8033/cmii/cmii-uav-gateway:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-gateway + image: 192.168.10.3:8033/cmii/cmii-admin-gateway:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-admin-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-admin-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-material-warehouse + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-material-warehouse + image: 192.168.10.3:8033/cmii/cmii-uav-material-warehouse:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-material-warehouse + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-material-warehouse + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-material-warehouse + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gis-server + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gis-server + image: 192.168.10.3:8033/cmii/cmii-uav-gis-server:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-gis-server + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-gis-server + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gis-server + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gis-server + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-manage + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-manage + image: 192.168.10.3:8033/cmii/cmii-uav-grid-manage:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-grid-manage + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-grid-manage + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-manage + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-open-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-open-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-open-gateway + image: 192.168.10.3:8033/cmii/cmii-open-gateway:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-open-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-open-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-open-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-open-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-autowaypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-autowaypoint + image: 192.168.10.3:8033/cmii/cmii-uav-autowaypoint:4.2.0-beta + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-autowaypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-autowaypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-autowaypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-airspace + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-airspace + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-airspace + image: 192.168.10.3:8033/cmii/cmii-uav-airspace:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-airspace + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-airspace + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-airspace + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-airspace + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-multilink + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-multilink + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-multilink + image: 192.168.10.3:8033/cmii/cmii-uav-multilink:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-multilink + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-multilink + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-multilink + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-multilink + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-multilink + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-emergency + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-emergency + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-emergency + image: 192.168.10.3:8033/cmii/cmii-uav-emergency:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-emergency + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-emergency + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-emergency + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-emergency + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-emergency + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-iam-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-iam-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-iam-gateway + image: 192.168.10.3:8033/cmii/cmii-iam-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-iam-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-iam-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-iam-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-iam-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-engine + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-engine + image: 192.168.10.3:8033/cmii/cmii-uav-grid-engine:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-grid-engine + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-grid-engine + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-engine + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mission + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mission + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mission + image: 192.168.10.3:8033/cmii/cmii-uav-mission:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-mission + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-mission + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mission + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mission + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-app-release + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-app-release + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-app-release + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-app-release + image: 192.168.10.3:8033/cmii/cmii-app-release:4.2.0-validation + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-app-release + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-app-release + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-app-release + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-app-release + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-user + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-user + image: 192.168.10.3:8033/cmii/cmii-uav-user:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-user + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-user + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-user + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-threedsimulation + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-threedsimulation + image: 192.168.10.3:8033/cmii/cmii-uav-threedsimulation:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-threedsimulation + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-threedsimulation + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-threedsimulation + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-oauth + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-oauth + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-oauth + image: 192.168.10.3:8033/cmii/cmii-uav-oauth:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-oauth + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-oauth + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-oauth + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-oauth + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-logger + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-logger + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-logger + image: 192.168.10.3:8033/cmii/cmii-uav-logger:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-logger + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-logger + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-logger + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-logger + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mqtthandler + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mqtthandler + image: 192.168.10.3:8033/cmii/cmii-uav-mqtthandler:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-mqtthandler + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-mqtthandler + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mqtthandler + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-user + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - zjyd + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-user + image: 192.168.10.3:8033/cmii/cmii-admin-user:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-admin-user + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-admin-user + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-user + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 diff --git a/agent-operator/deploy/z_bjtg/k8s-configmap.yaml b/agent-common/real_project/zjyd/k8s-configmap.yaml old mode 100755 new mode 100644 similarity index 65% rename from agent-operator/deploy/z_bjtg/k8s-configmap.yaml rename to agent-common/real_project/zjyd/k8s-configmap.yaml index 43eba92..5679b80 --- a/agent-operator/deploy/z_bjtg/k8s-configmap.yaml +++ b/agent-common/real_project/zjyd/k8s-configmap.yaml @@ -1,224 +1,28 @@ --- kind: ConfigMap apiVersion: v1 -metadata: - name: tenant-prefix-cmsportal - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "cmsportal", - AppClientId: "empty" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-detection - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "detection", - AppClientId: "APP_FDHW2VLVDWPnnOCy" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-emergency - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "emergency", - AppClientId: "APP_aGsTAY1uMZrpKdfk" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-multiterminal - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "multiterminal", - AppClientId: "APP_PvdfRRRBPL8xbIwl" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-oms - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "oms", - AppClientId: "empty" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-traffic - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "traffic", - AppClientId: "APP_Jc8i2wOQ1t73QEJS" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-jiangsuwenlv - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "jiangsuwenlv", - AppClientId: "empty" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-ai-brain - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "ai-brain", - AppClientId: "APP_rafnuCAmBESIVYMH" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-armypeople - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "armypeople", - AppClientId: "APP_UIegse6Lfou9pO1U" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-media - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "media", - AppClientId: "APP_4AU8lbifESQO4FD6" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-mws - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "mws", - AppClientId: "APP_uKniXPELlRERBBwK" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-open - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "open", - AppClientId: "empty" - } ---- -kind: ConfigMap -apiVersion: v1 metadata: name: tenant-prefix-qinghaitourism - namespace: bjtg + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", + CloudHOST: "111.2.224.59:8088", ApplicationShortName: "qinghaitourism", AppClientId: "empty" } --- kind: ConfigMap apiVersion: v1 -metadata: - name: tenant-prefix-platform - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "platform", - AppClientId: "empty" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-splice - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "splice", - AppClientId: "APP_zE0M3sTRXrCIJS8Y" - } ---- -kind: ConfigMap -apiVersion: v1 metadata: name: tenant-prefix-logistics - namespace: bjtg + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", + CloudHOST: "111.2.224.59:8088", ApplicationShortName: "logistics", AppClientId: "APP_PvdfRRRBPL8xbIwl" } @@ -226,14 +30,70 @@ data: kind: ConfigMap apiVersion: v1 metadata: - name: tenant-prefix-supervisionh5 - namespace: bjtg + name: tenant-prefix-mws + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "supervisionh5", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "mws", + AppClientId: "APP_uKniXPELlRERBBwK" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-media + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "media", + AppClientId: "APP_4AU8lbifESQO4FD6" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-share + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "share", + AppClientId: "APP_4lVSVI0ZGxTssir8" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-qingdao + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "qingdao", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervision + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "supervision", AppClientId: "APP_qqSu82THfexI8PLM" } --- @@ -241,12 +101,12 @@ kind: ConfigMap apiVersion: v1 metadata: name: tenant-prefix-base - namespace: bjtg + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", + CloudHOST: "111.2.224.59:8088", ApplicationShortName: "base", AppClientId: "APP_9LY41OaKSqk2btY0" } @@ -254,13 +114,237 @@ data: kind: ConfigMap apiVersion: v1 metadata: - name: tenant-prefix-security - namespace: bjtg + name: tenant-prefix-hljtt + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "hljtt", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-emergency + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "emergency", + AppClientId: "APP_aGsTAY1uMZrpKdfk" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-seniclive + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "seniclive", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-oms + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "oms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-jiangsuwenlv + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "jiangsuwenlv", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-open + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "open", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-splice + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "splice", + AppClientId: "APP_zE0M3sTRXrCIJS8Y" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uasms + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "uasms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uas + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "uas", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-ai-brain + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "ai-brain", + AppClientId: "APP_rafnuCAmBESIVYMH" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-multiterminal + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "multiterminal", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-traffic + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "traffic", + AppClientId: "APP_Jc8i2wOQ1t73QEJS" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-threedsimulation + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "threedsimulation", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervisionh5 + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "supervisionh5", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-armypeople + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "armypeople", + AppClientId: "APP_UIegse6Lfou9pO1U" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-detection + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "detection", + AppClientId: "APP_FDHW2VLVDWPnnOCy" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-security + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", ApplicationShortName: "security", AppClientId: "APP_JUSEMc7afyWXxvE7" } @@ -269,12 +353,12 @@ kind: ConfigMap apiVersion: v1 metadata: name: tenant-prefix-securityh5 - namespace: bjtg + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", + CloudHOST: "111.2.224.59:8088", ApplicationShortName: "securityh5", AppClientId: "APP_N3ImO0Ubfu9peRHD" } @@ -282,55 +366,55 @@ data: kind: ConfigMap apiVersion: v1 metadata: - name: tenant-prefix-seniclive - namespace: bjtg + name: tenant-prefix-visualization + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "seniclive", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "visualization", AppClientId: "empty" } --- kind: ConfigMap apiVersion: v1 metadata: - name: tenant-prefix-share - namespace: bjtg + name: tenant-prefix-dispatchh5 + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "share", - AppClientId: "APP_4lVSVI0ZGxTssir8" - } ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tenant-prefix-threedsimulation - namespace: bjtg -data: - ingress-config.js: |- - var __GlobalIngressConfig = { - TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "threedsimulation", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "dispatchh5", AppClientId: "empty" } --- kind: ConfigMap apiVersion: v1 metadata: - name: tenant-prefix-supervision - namespace: bjtg + name: tenant-prefix-pangu + namespace: zjyd data: ingress-config.js: |- var __GlobalIngressConfig = { TenantEnvironment: "", - CloudHOST: "10.250.0.110:8888", - ApplicationShortName: "supervision", - AppClientId: "APP_qqSu82THfexI8PLM" + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-cmsportal + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "cmsportal", + AppClientId: "empty" } diff --git a/agent-common/real_project/zjyd/k8s-dashboard.yaml b/agent-common/real_project/zjyd/k8s-dashboard.yaml new file mode 100644 index 0000000..be1e398 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-dashboard.yaml @@ -0,0 +1,309 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + name: kubernetes-dashboard + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + nodePort: 30554 + selector: + k8s-app: kubernetes-dashboard + type: NodePort + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kube-system +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kube-system +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kube-system + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [ "" ] + resources: [ "secrets" ] + resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] + verbs: [ "get", "update", "delete" ] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [ "" ] + resources: [ "configmaps" ] + resourceNames: [ "kubernetes-dashboard-settings" ] + verbs: [ "get", "update" ] + # Allow Dashboard to get metrics. + - apiGroups: [ "" ] + resources: [ "services" ] + resourceNames: [ "heapster", "dashboard-metrics-scraper" ] + verbs: [ "proxy" ] + - apiGroups: [ "" ] + resources: [ "services/proxy" ] + resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] + verbs: [ "get" ] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: [ "metrics.k8s.io" ] + resources: [ "pods", "nodes" ] + verbs: [ "get", "list", "watch" ] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: kubernetes-dashboard + image: 192.168.10.3:8033/cmii/dashboard:v2.0.1 + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kube-system + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: { } + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: 192.168.10.3:8033/cmii/metrics-scraper:v1.0.4 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: { } +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: admin-user + namespace: kube-system diff --git a/agent-common/real_project/zjyd/k8s-emqx.yaml b/agent-common/real_project/zjyd/k8s-emqx.yaml new file mode 100644 index 0000000..8357512 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-emqx.yaml @@ -0,0 +1,274 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-emqxs + namespace: zjyd +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-env + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +data: + EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443" + EMQX_NAME: "helm-emqxs" + EMQX_CLUSTER__DISCOVERY: "k8s" + EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs" + EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless" + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" + EMQX_CLUSTER__K8S__namespace: "zjyd" + EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" + EMQX_ALLOW_ANONYMOUS: "false" + EMQX_ACL_NOMATCH: "deny" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-cm + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +data: + emqx_auth_mnesia.conf: |- + auth.mnesia.password_hash = sha256 + + # clientid 认证数据 + # auth.client.1.clientid = admin + # auth.client.1.password = 4YPk*DS%+5 + + ## username 认证数据 + auth.user.1.username = admin + auth.user.1.password = odD8#Ve7.B + auth.user.2.username = cmlc + auth.user.2.password = odD8#Ve7.B + + acl.conf: |- + {allow, {user, "admin"}, pubsub, ["admin/#"]}. + {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. + {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. + {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. + {allow, all}. + + loaded_plugins: |- + {emqx_auth_mnesia,true}. + {emqx_auth_mnesia,true}. + {emqx_management, true}. + {emqx_recon, true}. + {emqx_retainer, false}. + {emqx_dashboard, true}. + {emqx_telemetry, true}. + {emqx_rule_engine, true}. + {emqx_bridge_mqtt, false}. +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-emqxs + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + replicas: 1 + serviceName: helm-emqxs-headless + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + template: + metadata: + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + spec: + affinity: { } + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-emqxs + containers: + - name: helm-emqxs + image: 192.168.10.3:8033/cmii/emqx:4.4.9 + imagePullPolicy: Always + ports: + - name: mqtt + containerPort: 1883 + - name: mqttssl + containerPort: 8883 + - name: mgmt + containerPort: 8081 + - name: ws + containerPort: 8083 + - name: wss + containerPort: 8084 + - name: dashboard + containerPort: 18083 + - name: ekka + containerPort: 4370 + envFrom: + - configMapRef: + name: helm-emqxs-env + resources: { } + volumeMounts: + - name: emqx-data + mountPath: "/opt/emqx/data/mnesia" + readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf" + subPath: emqx_auth_mnesia.conf + readOnly: false + # - name: helm-emqxs-cm + # mountPath: "/opt/emqx/etc/acl.conf" + # subPath: "acl.conf" + # readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/data/loaded_plugins" + subPath: loaded_plugins + readOnly: false + volumes: + - name: emqx-data + persistentVolumeClaim: + claimName: helm-emqxs + - name: helm-emqxs-cm + configMap: + name: helm-emqxs-cm + items: + - key: emqx_auth_mnesia.conf + path: emqx_auth_mnesia.conf + - key: acl.conf + path: acl.conf + - key: loaded_plugins + path: loaded_plugins +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: zjyd +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - watch + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: zjyd +subjects: + - kind: ServiceAccount + name: helm-emqxs + namespace: zjyd +roleRef: + kind: Role + name: helm-emqxs + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + type: NodePort + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - port: 1883 + name: mqtt + targetPort: 1883 + nodePort: 31883 + - port: 18083 + name: dashboard + targetPort: 18083 + nodePort: 38085 + - port: 8083 + name: mqtt-websocket + targetPort: 8083 + nodePort: 38083 +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs-headless + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + clusterIP: None + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - name: mqtt + port: 1883 + protocol: TCP + targetPort: 1883 + - name: mqttssl + port: 8883 + protocol: TCP + targetPort: 8883 + - name: mgmt + port: 8081 + protocol: TCP + targetPort: 8081 + - name: websocket + port: 8083 + protocol: TCP + targetPort: 8083 + - name: wss + port: 8084 + protocol: TCP + targetPort: 8084 + - name: dashboard + port: 18083 + protocol: TCP + targetPort: 18083 + - name: ekka + port: 4370 + protocol: TCP + targetPort: 4370 diff --git a/agent-operator/deploy/z_bjtg/k8s-frontend.yaml b/agent-common/real_project/zjyd/k8s-frontend.yaml old mode 100755 new mode 100644 similarity index 79% rename from agent-operator/deploy/z_bjtg/k8s-frontend.yaml rename to agent-common/real_project/zjyd/k8s-frontend.yaml index 960b43d..c40f32d --- a/agent-operator/deploy/z_bjtg/k8s-frontend.yaml +++ b/agent-common/real_project/zjyd/k8s-frontend.yaml @@ -3,853 +3,37 @@ kind: ConfigMap apiVersion: v1 metadata: name: nginx-cm - namespace: bjtg + namespace: zjyd labels: cmii.type: frontend data: nginx.conf: | - user root; - worker_processes auto; + server { + listen 9528; + server_name localhost; + gzip on; - events { - worker_connections 1024; - } - http { - include mime.types; - default_type application/octet-stream; + location / { + root /home/cmii-platform/dist; + index index.html index.htm; + } - sendfile on; - - keepalive_timeout 600; - - server { - listen 9528; - server_name localhost; - gzip on; - - location / { - root /home/cmii-platform/dist; - index index.html index.htm; - } - - error_page 500 502 503 504 /50x.html; - location = /50x.html { - root html; - } + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root html; } } --- apiVersion: apps/v1 kind: Deployment -metadata: - name: cmii-uav-platform-jiangsuwenlv - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-jiangsuwenlv - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-jiangsuwenlv - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-jiangsuwenlv - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-jiangsuwenlv - image: 10.250.0.110:8033/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-jiangsuwenlv - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-jiangsuwenlv - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-jiangsuwenlv - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-jiangsuwenlv - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-jiangsuwenlv - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-logistics - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-logistics - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-logistics - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-logistics - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-logistics - image: 10.250.0.110:8033/cmii/cmii-uav-platform-logistics:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-logistics - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-logistics - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-logistics - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-logistics - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-logistics - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-multiterminal - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-multiterminal - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-multiterminal - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-multiterminal - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-multiterminal - image: 10.250.0.110:8033/cmii/cmii-uav-platform-multiterminal:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-multiterminal - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-multiterminal - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-multiterminal - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-multiterminal - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-multiterminal - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-open - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-open - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-open - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-open - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-open - image: 10.250.0.110:8033/cmii/cmii-uav-platform-open:5.5.0-0419 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-open - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-open - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-open - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-open - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-open - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform - image: 10.250.0.110:8033/cmii/cmii-uav-platform:5.5.0-27712-0507 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-platform - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-ai-brain - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-ai-brain - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-ai-brain - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-ai-brain - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-ai-brain - image: 10.250.0.110:8033/cmii/cmii-uav-platform-ai-brain:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-ai-brain - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-ai-brain - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-ai-brain - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-ai-brain - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-ai-brain - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-mws - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-mws - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-mws - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-mws - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-mws - image: 10.250.0.110:8033/cmii/cmii-uav-platform-mws:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-mws - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-mws - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-mws - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-mws - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-mws - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-security - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-security - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-security - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-security - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-security - image: 10.250.0.110:8033/cmii/cmii-uav-platform-security:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-security - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-security - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-security - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-security - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-security - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-splice - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-splice - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-splice - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-splice - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-splice - image: 10.250.0.110:8033/cmii/cmii-uav-platform-splice:5.5.0-042601 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-splice - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-splice - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-splice - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-splice - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-splice - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: cmii-uav-platform-threedsimulation - namespace: bjtg + namespace: zjyd labels: cmii.type: frontend cmii.app: cmii-uav-platform-threedsimulation octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: replicas: 1 strategy: @@ -869,11 +53,11 @@ spec: - name: harborsecret containers: - name: cmii-uav-platform-threedsimulation - image: 10.250.0.110:8033/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392 + image: 192.168.10.3:8033/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME value: cmii-uav-platform-threedsimulation ports: @@ -885,11 +69,11 @@ spec: cpu: "1" memory: 1Gi requests: - cpu: 500m - memory: 500Mi + cpu: 50m + memory: 50Mi volumeMounts: - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf + mountPath: /etc/nginx/conf.d/nginx.conf subPath: nginx.conf - name: tenant-prefix subPath: ingress-config.js @@ -912,12 +96,12 @@ apiVersion: v1 kind: Service metadata: name: cmii-uav-platform-threedsimulation - namespace: bjtg + namespace: zjyd labels: cmii.type: frontend cmii.app: cmii-uav-platform-threedsimulation octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 + app.kubernetes.io/version: 5.7.0 spec: type: ClusterIP selector: @@ -931,726 +115,14 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - name: cmii-uav-platform-securityh5 - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-securityh5 - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-securityh5 - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-securityh5 - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-securityh5 - image: 10.250.0.110:8033/cmii/cmii-uav-platform-securityh5:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-securityh5 - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-securityh5 - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-securityh5 - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-securityh5 - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-securityh5 - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-share - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-share - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-share - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-share - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-share - image: 10.250.0.110:8033/cmii/cmii-uav-platform-share:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-share - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-share - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-share - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-share - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-share - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-armypeople - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-armypeople - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-armypeople - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-armypeople - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-armypeople - image: 10.250.0.110:8033/cmii/cmii-uav-platform-armypeople:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-armypeople - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-armypeople - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-armypeople - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-armypeople - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-armypeople - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-cms-portal - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-cms-portal - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-cms-portal - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-cms-portal - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-cms-portal - image: 10.250.0.110:8033/cmii/cmii-uav-platform-cms-portal:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-cms-portal - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-cmsportal - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-cms-portal - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-cms-portal - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-cms-portal - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-media - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-media - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-media - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-media - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-media - image: 10.250.0.110:8033/cmii/cmii-uav-platform-media:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-media - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-media - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-media - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-media - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-media - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-oms - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-oms - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-oms - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-oms - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-oms - image: 10.250.0.110:8033/cmii/cmii-uav-platform-oms:5.5.0-042801 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-oms - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-oms - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-oms - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-oms - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-oms - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-platform-qinghaitourism - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-qinghaitourism - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-uav-platform-qinghaitourism - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-qinghaitourism - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-platform-qinghaitourism - image: 10.250.0.110:8033/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-platform-qinghaitourism - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-qinghaitourism - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-platform-qinghaitourism - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-uav-platform-qinghaitourism - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-uav-platform-qinghaitourism - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-suav-platform-supervision - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-suav-platform-supervision - octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: cmii-suav-platform-supervision - template: - metadata: - labels: - cmii.type: frontend - cmii.app: cmii-suav-platform-supervision - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-suav-platform-supervision - image: 10.250.0.110:8033/cmii/cmii-suav-platform-supervision:5.5.0-042301 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-suav-platform-supervision - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-supervision - items: - - key: ingress-config.js - path: ingress-config.js ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-suav-platform-supervision - namespace: bjtg - labels: - cmii.type: frontend - cmii.app: cmii-suav-platform-supervision - octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: frontend - cmii.app: cmii-suav-platform-supervision - ports: - - name: web-svc-port - port: 9528 - protocol: TCP - targetPort: 9528 ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: cmii-suav-platform-supervisionh5 - namespace: bjtg + namespace: zjyd labels: cmii.type: frontend cmii.app: cmii-suav-platform-supervisionh5 octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: replicas: 1 strategy: @@ -1670,11 +142,11 @@ spec: - name: harborsecret containers: - name: cmii-suav-platform-supervisionh5 - image: 10.250.0.110:8033/cmii/cmii-suav-platform-supervisionh5:5.5.0 + image: 192.168.10.3:8033/cmii/cmii-suav-platform-supervisionh5:5.7.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME value: cmii-suav-platform-supervisionh5 ports: @@ -1686,11 +158,11 @@ spec: cpu: "1" memory: 1Gi requests: - cpu: 500m - memory: 500Mi + cpu: 50m + memory: 50Mi volumeMounts: - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf + mountPath: /etc/nginx/conf.d/nginx.conf subPath: nginx.conf - name: tenant-prefix subPath: ingress-config.js @@ -1713,12 +185,12 @@ apiVersion: v1 kind: Service metadata: name: cmii-suav-platform-supervisionh5 - namespace: bjtg + namespace: zjyd labels: cmii.type: frontend cmii.app: cmii-suav-platform-supervisionh5 octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 + app.kubernetes.io/version: 5.7.0 spec: type: ClusterIP selector: @@ -1733,13 +205,13 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: cmii-uav-platform-base - namespace: bjtg + name: cmii-uav-platform-oms + namespace: zjyd labels: cmii.type: frontend - cmii.app: cmii-uav-platform-base + cmii.app: cmii-uav-platform-oms octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: replicas: 1 strategy: @@ -1748,24 +220,24 @@ spec: selector: matchLabels: cmii.type: frontend - cmii.app: cmii-uav-platform-base + cmii.app: cmii-uav-platform-oms template: metadata: labels: cmii.type: frontend - cmii.app: cmii-uav-platform-base + cmii.app: cmii-uav-platform-oms spec: imagePullSecrets: - name: harborsecret containers: - - name: cmii-uav-platform-base - image: 10.250.0.110:8033/cmii/cmii-uav-platform-base:5.4.0 + - name: cmii-uav-platform-oms + image: 192.168.10.3:8033/cmii/cmii-uav-platform-oms:5.7.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME - value: cmii-uav-platform-base + value: cmii-uav-platform-oms ports: - name: platform-9528 containerPort: 9528 @@ -1775,11 +247,11 @@ spec: cpu: "1" memory: 1Gi requests: - cpu: 500m - memory: 500Mi + cpu: 50m + memory: 50Mi volumeMounts: - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf + mountPath: /etc/nginx/conf.d/nginx.conf subPath: nginx.conf - name: tenant-prefix subPath: ingress-config.js @@ -1793,7 +265,7 @@ spec: path: nginx.conf - name: tenant-prefix configMap: - name: tenant-prefix-base + name: tenant-prefix-oms items: - key: ingress-config.js path: ingress-config.js @@ -1801,18 +273,107 @@ spec: apiVersion: v1 kind: Service metadata: - name: cmii-uav-platform-base - namespace: bjtg + name: cmii-uav-platform-oms + namespace: zjyd labels: cmii.type: frontend - cmii.app: cmii-uav-platform-base + cmii.app: cmii-uav-platform-oms octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 + app.kubernetes.io/version: 5.7.0 spec: type: ClusterIP selector: cmii.type: frontend - cmii.app: cmii-uav-platform-base + cmii.app: cmii-uav-platform-oms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-media + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-media + image: 192.168.10.3:8033/cmii/cmii-uav-platform-media:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-media + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-media + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-media + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-media ports: - name: web-svc-port port: 9528 @@ -1823,12 +384,12 @@ apiVersion: apps/v1 kind: Deployment metadata: name: cmii-uav-platform-detection - namespace: bjtg + namespace: zjyd labels: cmii.type: frontend cmii.app: cmii-uav-platform-detection octopus.control: frontend-app-wdd - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: replicas: 1 strategy: @@ -1848,11 +409,11 @@ spec: - name: harborsecret containers: - name: cmii-uav-platform-detection - image: 10.250.0.110:8033/cmii/cmii-uav-platform-detection:5.5.0 + image: 192.168.10.3:8033/cmii/cmii-uav-platform-detection:5.6.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME value: cmii-uav-platform-detection ports: @@ -1864,11 +425,11 @@ spec: cpu: "1" memory: 1Gi requests: - cpu: 500m - memory: 500Mi + cpu: 50m + memory: 50Mi volumeMounts: - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf + mountPath: /etc/nginx/conf.d/nginx.conf subPath: nginx.conf - name: tenant-prefix subPath: ingress-config.js @@ -1891,12 +452,12 @@ apiVersion: v1 kind: Service metadata: name: cmii-uav-platform-detection - namespace: bjtg + namespace: zjyd labels: cmii.type: frontend cmii.app: cmii-uav-platform-detection octopus.control: frontend-app-wdd - app.kubernetes.io/version: 5.5.0 + app.kubernetes.io/version: 5.7.0 spec: type: ClusterIP selector: @@ -1907,3 +468,1516 @@ spec: port: 9528 protocol: TCP targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-ai-brain + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-ai-brain + image: 192.168.10.3:8033/cmii/cmii-uav-platform-ai-brain:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-ai-brain + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-ai-brain + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-ai-brain + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uas + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uas + image: 192.168.10.3:8033/cmii/cmii-uav-platform-uas:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-uas + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uas + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uas + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-share + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-share + image: 192.168.10.3:8033/cmii/cmii-uav-platform-share:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-share + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-share + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-share + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-mws + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-mws + image: 192.168.10.3:8033/cmii/cmii-uav-platform-mws:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-mws + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-mws + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-mws + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-cms-portal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-cms-portal + image: 192.168.10.3:8033/cmii/cmii-uav-platform-cms-portal:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-cms-portal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-cmsportal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-cms-portal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uasms + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uasms + image: 192.168.10.3:8033/cmii/cmii-uav-platform-uasms:5.7.0-071815 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-uasms + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uasms + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uasms + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-logistics + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-logistics + image: 192.168.10.3:8033/cmii/cmii-uav-platform-logistics:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-logistics + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-logistics + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-logistics + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-emergency-rescue + image: 192.168.10.3:8033/cmii/cmii-uav-platform-emergency-rescue:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-emergency-rescue + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-emergency + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-dispatchh5 + image: 192.168.10.3:8033/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-dispatchh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-dispatchh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-splice + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-splice + image: 192.168.10.3:8033/cmii/cmii-uav-platform-splice:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-splice + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-splice + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-splice + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-security + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-security + image: 192.168.10.3:8033/cmii/cmii-uav-platform-security:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-security + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-security + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-security + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-securityh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-securityh5 + image: 192.168.10.3:8033/cmii/cmii-uav-platform-securityh5:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-securityh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-securityh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-securityh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-multiterminal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-multiterminal + image: 192.168.10.3:8033/cmii/cmii-uav-platform-multiterminal:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-multiterminal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-multiterminal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-multiterminal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-armypeople + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-armypeople + image: 192.168.10.3:8033/cmii/cmii-uav-platform-armypeople:5.7.0-29668-071901 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-armypeople + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-armypeople + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-armypeople + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform + image: 192.168.10.3:8033/cmii/cmii-uav-platform:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-pangu + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-platform-supervision + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-platform-supervision + image: 192.168.10.3:8033/cmii/cmii-suav-platform-supervision:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-suav-platform-supervision + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-supervision + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-platform-supervision + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-open + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-open + image: 192.168.10.3:8033/cmii/cmii-uav-platform-open:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-open + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-open + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-open + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 diff --git a/agent-common/real_project/zjyd/k8s-ingress.yaml b/agent-common/real_project/zjyd/k8s-ingress.yaml new file mode 100644 index 0000000..cc9145d --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-ingress.yaml @@ -0,0 +1,604 @@ +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: frontend-applications-ingress + namespace: zjyd + labels: + type: frontend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^(/supervision)$ $1/ redirect; + rewrite ^(/supervisionh5)$ $1/ redirect; + rewrite ^(/pangu)$ $1/ redirect; + rewrite ^(/ai-brain)$ $1/ redirect; + rewrite ^(/armypeople)$ $1/ redirect; + rewrite ^(/base)$ $1/ redirect; + rewrite ^(/cmsportal)$ $1/ redirect; + rewrite ^(/detection)$ $1/ redirect; + rewrite ^(/dispatchh5)$ $1/ redirect; + rewrite ^(/emergency)$ $1/ redirect; + rewrite ^(/hljtt)$ $1/ redirect; + rewrite ^(/jiangsuwenlv)$ $1/ redirect; + rewrite ^(/logistics)$ $1/ redirect; + rewrite ^(/media)$ $1/ redirect; + rewrite ^(/multiterminal)$ $1/ redirect; + rewrite ^(/mws)$ $1/ redirect; + rewrite ^(/oms)$ $1/ redirect; + rewrite ^(/open)$ $1/ redirect; + rewrite ^(/qingdao)$ $1/ redirect; + rewrite ^(/qinghaitourism)$ $1/ redirect; + rewrite ^(/security)$ $1/ redirect; + rewrite ^(/securityh5)$ $1/ redirect; + rewrite ^(/seniclive)$ $1/ redirect; + rewrite ^(/share)$ $1/ redirect; + rewrite ^(/splice)$ $1/ redirect; + rewrite ^(/threedsimulation)$ $1/ redirect; + rewrite ^(/traffic)$ $1/ redirect; + rewrite ^(/uas)$ $1/ redirect; + rewrite ^(/uasms)$ $1/ redirect; + rewrite ^(/visualization)$ $1/ redirect; +spec: + rules: + - host: fake-domain.zjyd.io + http: + paths: + - path: /?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /supervision/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervision + servicePort: 9528 + - path: /supervisionh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervisionh5 + servicePort: 9528 + - path: /pangu/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /ai-brain/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-ai-brain + servicePort: 9528 + - path: /armypeople/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-armypeople + servicePort: 9528 + - path: /base/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-base + servicePort: 9528 + - path: /cmsportal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /detection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-detection + servicePort: 9528 + - path: /dispatchh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-dispatchh5 + servicePort: 9528 + - path: /emergency/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-emergency-rescue + servicePort: 9528 + - path: /hljtt/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-hljtt + servicePort: 9528 + - path: /jiangsuwenlv/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-jiangsuwenlv + servicePort: 9528 + - path: /logistics/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-logistics + servicePort: 9528 + - path: /media/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-media + servicePort: 9528 + - path: /multiterminal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-multiterminal + servicePort: 9528 + - path: /mws/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws + servicePort: 9528 + - path: /oms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-oms + servicePort: 9528 + - path: /open/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-open + servicePort: 9528 + - path: /qingdao/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qingdao + servicePort: 9528 + - path: /qinghaitourism/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qinghaitourism + servicePort: 9528 + - path: /security/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-security + servicePort: 9528 + - path: /securityh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-securityh5 + servicePort: 9528 + - path: /seniclive/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-seniclive + servicePort: 9528 + - path: /share/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-share + servicePort: 9528 + - path: /splice/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice + servicePort: 9528 + - path: /threedsimulation/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-threedsimulation + servicePort: 9528 + - path: /traffic/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-traffic + servicePort: 9528 + - path: /uas/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uas + servicePort: 9528 + - path: /uasms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uasms + servicePort: 9528 + - path: /visualization/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-visualization + servicePort: 9528 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: backend-applications-ingress + namespace: zjyd + labels: + type: backend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" +spec: + rules: + - host: cmii-admin-data.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-data + servicePort: 8080 + - host: cmii-admin-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - host: cmii-admin-user.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-user + servicePort: 8080 + - host: cmii-app-release.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-app-release + servicePort: 8080 + - host: cmii-open-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - host: cmii-suav-supervision.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-supervision + servicePort: 8080 + - host: cmii-uas-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-gateway + servicePort: 8080 + - host: cmii-uas-lifecycle.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-lifecycle + servicePort: 8080 + - host: cmii-uav-airspace.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-airspace + servicePort: 8080 + - host: cmii-uav-alarm.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-alarm + servicePort: 8080 + - host: cmii-uav-autowaypoint.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-autowaypoint + servicePort: 8080 + - host: cmii-uav-brain.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-brain + servicePort: 8080 + - host: cmii-uav-bridge.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-bridge + servicePort: 8080 + - host: cmii-uav-cloud-live.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cloud-live + servicePort: 8080 + - host: cmii-uav-clusters.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-clusters + servicePort: 8080 + - host: cmii-uav-cms.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cms + servicePort: 8080 + - host: cmii-uav-data-post-process.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-data-post-process + servicePort: 8080 + - host: cmii-uav-depotautoreturn.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-depotautoreturn + servicePort: 8080 + - host: cmii-uav-developer.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-developer + servicePort: 8080 + - host: cmii-uav-device.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-device + servicePort: 8080 + - host: cmii-uav-emergency.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-emergency + servicePort: 8080 + - host: cmii-uav-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 + - host: cmii-uav-gis-server.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gis-server + servicePort: 8080 + - host: cmii-uav-grid-datasource.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-datasource + servicePort: 8080 + - host: cmii-uav-grid-engine.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-engine + servicePort: 8080 + - host: cmii-uav-grid-manage.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-manage + servicePort: 8080 + - host: cmii-uav-industrial-portfolio.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-industrial-portfolio + servicePort: 8080 + - host: cmii-uav-integration.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-integration + servicePort: 8080 + - host: cmii-uav-kpi-monitor.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-kpi-monitor + servicePort: 8080 + - host: cmii-uav-logger.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-logger + servicePort: 8080 + - host: cmii-uav-material-warehouse.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-material-warehouse + servicePort: 8080 + - host: cmii-uav-mission.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mission + servicePort: 8080 + - host: cmii-uav-mqtthandler.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mqtthandler + servicePort: 8080 + - host: cmii-uav-multilink.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-multilink + servicePort: 8080 + - host: cmii-uav-notice.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-notice + servicePort: 8080 + - host: cmii-uav-oauth.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-oauth + servicePort: 8080 + - host: cmii-uav-process.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-process + servicePort: 8080 + - host: cmii-uav-sense-adapter.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-sense-adapter + servicePort: 8080 + - host: cmii-uav-surveillance.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-surveillance + servicePort: 8080 + - host: cmii-uav-threedsimulation.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-threedsimulation + servicePort: 8080 + - host: cmii-uav-tower.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-tower + servicePort: 8080 + - host: cmii-uav-user.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-user + servicePort: 8080 + - host: cmii-uav-waypoint.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-waypoint + servicePort: 8080 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: all-gateways-ingress + namespace: zjyd + labels: + type: api-gateway + octopus.control: all-ingress-config-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; +spec: + rules: + - host: fake-domain.zjyd.io + http: + paths: + - path: /oms/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - path: /open/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - path: /api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 diff --git a/agent-common/real_project/zjyd/k8s-mongo.yaml b/agent-common/real_project/zjyd/k8s-mongo.yaml new file mode 100644 index 0000000..95e234b --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-mongo.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mongo + namespace: zjyd + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + type: NodePort + selector: + cmii.app: helm-mongo + cmii.type: middleware + ports: + - port: 27017 + name: server-27017 + targetPort: 27017 + nodePort: 37017 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mongo + namespace: zjyd + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + serviceName: helm-mongo + replicas: 1 + selector: + matchLabels: + cmii.app: helm-mongo + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: helm-mongo + image: 192.168.10.3:8033/cmii/mongo:5.0 + resources: { } + ports: + - containerPort: 27017 + name: mongo27017 + protocol: TCP + env: + - name: MONGO_INITDB_ROOT_USERNAME + value: cmlc + - name: MONGO_INITDB_ROOT_PASSWORD + value: REdPza8#oVlt + volumeMounts: + - name: mongo-data + mountPath: /data/db + readOnly: false + subPath: default/helm-mongo/data/db + volumes: + - name: mongo-data + persistentVolumeClaim: + claimName: helm-mongo +--- diff --git a/agent-common/real_project/zjyd/k8s-mysql.yaml b/agent-common/real_project/zjyd/k8s-mysql.yaml new file mode 100644 index 0000000..b9f9310 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-mysql.yaml @@ -0,0 +1,423 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + annotations: { } +secrets: + - name: helm-mysql +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + mysql-root-password: "UXpmWFFoZDNiUQ==" + mysql-password: "S0F0cm5PckFKNw==" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + my.cnf: |- + + [mysqld] + port=3306 + basedir=/opt/bitnami/mysql + datadir=/bitnami/mysql/data + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + socket=/opt/bitnami/mysql/tmp/mysql.sock + log-error=/bitnami/mysql/data/error.log + general_log_file = /bitnami/mysql/data/general.log + slow_query_log_file = /bitnami/mysql/data/slow.log + innodb_data_file_path = ibdata1:512M:autoextend + innodb_buffer_pool_size = 512M + innodb_buffer_pool_instances = 2 + innodb_log_file_size = 512M + innodb_log_files_in_group = 4 + innodb_log_files_in_group = 4 + log-bin = /bitnami/mysql/data/mysql-bin + max_binlog_size=1G + transaction_isolation = REPEATABLE-READ + default_storage_engine = innodb + character-set-server = utf8mb4 + collation-server=utf8mb4_bin + binlog_format = ROW + binlog_rows_query_log_events=on + binlog_cache_size=4M + binlog_expire_logs_seconds = 1296000 + max_binlog_cache_size=2G + gtid_mode = on + enforce_gtid_consistency = 1 + sync_binlog = 1 + innodb_flush_log_at_trx_commit = 1 + innodb_flush_method = O_DIRECT + log_slave_updates=1 + relay_log_recovery = 1 + relay-log-purge = 1 + default_time_zone = '+08:00' + lower_case_table_names=1 + log_bin_trust_function_creators=1 + group_concat_max_len=67108864 + innodb_io_capacity = 4000 + innodb_io_capacity_max = 8000 + innodb_flush_sync = 0 + innodb_flush_neighbors = 0 + innodb_write_io_threads = 8 + innodb_read_io_threads = 8 + innodb_purge_threads = 4 + innodb_page_cleaners = 4 + innodb_open_files = 65535 + innodb_max_dirty_pages_pct = 50 + innodb_lru_scan_depth = 4000 + innodb_checksum_algorithm = crc32 + innodb_lock_wait_timeout = 10 + innodb_rollback_on_timeout = 1 + innodb_print_all_deadlocks = 1 + innodb_file_per_table = 1 + innodb_online_alter_log_max_size = 4G + innodb_stats_on_metadata = 0 + innodb_thread_concurrency = 0 + innodb_sync_spin_loops = 100 + innodb_spin_wait_delay = 30 + lock_wait_timeout = 3600 + slow_query_log = 1 + long_query_time = 10 + log_queries_not_using_indexes =1 + log_throttle_queries_not_using_indexes = 60 + min_examined_row_limit = 100 + log_slow_admin_statements = 1 + log_slow_slave_statements = 1 + default_authentication_plugin=mysql_native_password + skip-name-resolve=1 + explicit_defaults_for_timestamp=1 + plugin_dir=/opt/bitnami/mysql/plugin + max_allowed_packet=128M + max_connections = 2000 + max_connect_errors = 1000000 + table_definition_cache=2000 + table_open_cache_instances=64 + tablespace_definition_cache=1024 + thread_cache_size=256 + interactive_timeout = 600 + wait_timeout = 600 + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=32M + bind-address=0.0.0.0 + performance_schema = 1 + performance_schema_instrument = '%memory%=on' + performance_schema_instrument = '%lock%=on' + innodb_monitor_enable=ALL + + [mysql] + no-auto-rehash + + [mysqldump] + quick + max_allowed_packet = 32M + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql-init-scripts + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + create_users_grants_core.sql: |- + create + user zyly@'%' identified by 'Cmii@451315'; + grant select on *.* to zyly@'%'; + create + user zyly_qc@'%' identified by 'Uh)E_owCyb16'; + grant all + on *.* to zyly_qc@'%'; + create + user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; + grant all + on *.* to k8s_admin@'%'; + create + user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; + grant all + on *.* to audit_dba@'%'; + create + user db_backup@'%' identified by 'RU5Pu(4FGdT9'; + GRANT + SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT + on *.* to db_backup@'%'; + create + user monitor@'%' identified by 'PL3#nGtrWbf-'; + grant REPLICATION + CLIENT on *.* to monitor@'%'; + flush + privileges; +--- +kind: Service +apiVersion: v1 +metadata: + name: cmii-mysql + namespace: zjyd + labels: + app.kubernetes.io/component: primary + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.app: mysql + cmii.type: middleware + octopus.control: mysql-db-wdd +spec: + ports: + - name: mysql + protocol: TCP + port: 13306 + targetPort: mysql + selector: + app.kubernetes.io/component: primary + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.app: mysql + cmii.type: middleware + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql-headless + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: 3306 + targetPort: mysql + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: NodePort + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: mysql + nodePort: 33306 + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + serviceName: helm-mysql + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-mysql + affinity: { } + nodeSelector: + mysql-deploy: "true" + securityContext: + fsGroup: 1001 + initContainers: + - name: change-volume-permissions + image: 192.168.10.3:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + - -ec + - | + chown -R 1001:1001 /bitnami/mysql + securityContext: + runAsUser: 0 + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + containers: + - name: mysql + image: 192.168.10.3:8033/cmii/mysql:8.1.0-debian-11-r42 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "true" + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: helm-mysql + key: mysql-root-password + - name: MYSQL_DATABASE + value: "cmii" + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + readinessProbe: + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + resources: + limits: { } + requests: { } + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + volumes: + - name: config + configMap: + name: helm-mysql + - name: custom-init-scripts + configMap: + name: helm-mysql-init-scripts + - name: mysql-data + hostPath: + path: /var/lib/docker/mysql-pv/zjyd/ diff --git a/agent-common/real_project/zjyd/k8s-nacos.yaml b/agent-common/real_project/zjyd/k8s-nacos.yaml new file mode 100644 index 0000000..1f70d80 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-nacos.yaml @@ -0,0 +1,130 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-nacos-cm + namespace: zjyd + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.7.0 +data: + mysql.db.name: "cmii_nacos_config" + mysql.db.host: "helm-mysql" + mysql.port: "3306" + mysql.user: "k8s_admin" + mysql.password: "fP#UaH6qQ3)8" +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-nacos + namespace: zjyd + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.7.0 +spec: + type: NodePort + selector: + cmii.app: helm-nacos + cmii.type: middleware + ports: + - port: 8848 + name: server + targetPort: 8848 + nodePort: 38848 + - port: 9848 + name: server12 + targetPort: 9848 + - port: 9849 + name: server23 + targetPort: 9849 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-nacos + namespace: zjyd + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.7.0 +spec: + serviceName: helm-nacos + replicas: 1 + selector: + matchLabels: + cmii.app: helm-nacos + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/version: 5.7.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: nacos-server + image: 192.168.10.3:8033/cmii/nacos-server:v2.1.2 + ports: + - containerPort: 8848 + name: dashboard + - containerPort: 9848 + name: tcp-9848 + - containerPort: 9849 + name: tcp-9849 + env: + - name: NACOS_AUTH_ENABLE + value: "false" + - name: NACOS_REPLICAS + value: "1" + - name: MYSQL_SERVICE_DB_NAME + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.name + - name: MYSQL_SERVICE_PORT + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.port + - name: MYSQL_SERVICE_USER + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.user + - name: MYSQL_SERVICE_PASSWORD + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.password + - name: MYSQL_SERVICE_HOST + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.host + - name: NACOS_SERVER_PORT + value: "8848" + - name: NACOS_APPLICATION_PORT + value: "8848" + - name: PREFER_HOST_MODE + value: "hostname" + - name: MODE + value: standalone + - name: SPRING_DATASOURCE_PLATFORM + value: mysql +--- diff --git a/agent-common/real_project/zjyd/k8s-nfs-test.yaml b/agent-common/real_project/zjyd/k8s-nfs-test.yaml new file mode 100644 index 0000000..f49291a --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-nfs-test.yaml @@ -0,0 +1,38 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-prod-distribute + resources: + requests: + storage: 1Mi +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod +spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: test-pod + image: 192.168.10.3:8033/cmii/busybox:latest + command: + - "/bin/sh" + args: + - "-c" + - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim #与PVC名称保持一致 diff --git a/agent-common/real_project/zjyd/k8s-nfs.yaml b/agent-common/real_project/zjyd/k8s-nfs.yaml new file mode 100644 index 0000000..726fb42 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-nfs.yaml @@ -0,0 +1,114 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #根据实际环境设定namespace,下面类同 +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: ClusterRole + # name: nfs-client-provisioner-runner + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get", "list", "watch", "create", "update", "patch" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs-prod-distribute +provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #与RBAC文件中的namespace保持一致 +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: 192.168.10.3:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: cmlc-nfs-storage + - name: NFS_SERVER + value: 192.168.10.3 + - name: NFS_PATH + value: /var/lib/docker/nfs_data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.10.3 + path: /var/lib/docker/nfs_data diff --git a/agent-common/real_project/zjyd/k8s-pvc.yaml b/agent-common/real_project/zjyd/k8s-pvc.yaml new file mode 100644 index 0000000..8082c4e --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-pvc.yaml @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-backend-log-pvc + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: nfs-backend-log-pvc + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-emqxs + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: helm-emqxs + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-mongo + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: helm-mongo + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 30Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: helm-rabbitmq + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi diff --git a/agent-common/real_project/zjyd/k8s-rabbitmq.yaml b/agent-common/real_project/zjyd/k8s-rabbitmq.yaml new file mode 100644 index 0000000..bc68704 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-rabbitmq.yaml @@ -0,0 +1,328 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +automountServiceAccountToken: true +secrets: + - name: helm-rabbitmq +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +type: Opaque +data: + rabbitmq-password: "blljUk45MXIuX2hq" + rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-rabbitmq-config + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +data: + rabbitmq.conf: |- + ## Username and password + ## + default_user = admin + default_pass = nYcRN91r._hj + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + #default_vhost = default-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +subjects: + - kind: ServiceAccount + name: helm-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helm-rabbitmq-endpoint-reader +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq-headless + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: dashboard + port: 15672 + targetPort: stats + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjyd + publishNotReadyAddresses: true +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +spec: + type: NodePort + ports: + - name: amqp + port: 5672 + targetPort: amqp + nodePort: 35672 + - name: dashboard + port: 15672 + targetPort: dashboard + nodePort: 36675 + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjyd +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +spec: + serviceName: helm-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjyd + template: + metadata: + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq + annotations: + checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 + checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-rabbitmq + affinity: { } + securityContext: + fsGroup: 5001 + runAsUser: 5001 + terminationGracePeriodSeconds: 120 + initContainers: + - name: volume-permissions + image: 192.168.10.3:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + resources: + limits: { } + requests: { } + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + containers: + - name: rabbitmq + image: 192.168.10.3:8033/cmii/rabbitmq:3.9.12-debian-10-r3 + imagePullPolicy: "Always" + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "helm-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_FORCE_BOOT + value: "no" + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: "no" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: "admin" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" + ports: + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: dashboard + containerPort: 15672 + - name: epmd + containerPort: 4369 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: 120 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" + else + rabbitmqctl stop_app + fi + resources: + limits: { } + requests: { } + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + volumes: + - name: configuration + configMap: + name: helm-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - name: data + persistentVolumeClaim: + claimName: helm-rabbitmq diff --git a/agent-common/real_project/zjyd/k8s-redis.yaml b/agent-common/real_project/zjyd/k8s-redis.yaml new file mode 100644 index 0000000..a63b949 --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-redis.yaml @@ -0,0 +1,585 @@ +--- +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: helm-redis + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-redis + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + redis-password: "TWNhY2hlQDQ1MjI=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-configuration + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +data: + redis.conf: |- + # User-supplied common configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # End of common configuration + master.conf: |- + dir /data + # User-supplied master configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of master configuration + replica.conf: |- + dir /data + slave-read-only yes + # User-supplied replica configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of replica configuration +--- +# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-health + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-scripts + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +data: + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + exec redis-server "${ARGS[@]}" + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo 26379 + ;; + "REDIS") + echo 6379 + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + echo "${hostname}.${HEADLESS_SERVICE}" + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + exec redis-server "${ARGS[@]}" +--- +# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-headless + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd +--- +# Source: outside-deploy/charts/redis-db/templates/master/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-master + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + type: ClusterIP + + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-replicas + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + type: ClusterIP + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + app.kubernetes.io/component: replica +--- +# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-master + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + affinity: { } + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + imagePullSecrets: + - name: harborsecret + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 192.168.10.3:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: tmp + emptyDir: { } + - name: redis-data + emptyDir: { } +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-replicas + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + app.kubernetes.io/component: replica + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + imagePullSecrets: + - name: harborsecret + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 192.168.10.3:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: helm-redis-master-0.helm-redis-headless.zjyd.svc.cluster.local + - name: REDIS_MASTER_PORT_NUMBER + value: "6379" + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: redis-data + emptyDir: { } + diff --git a/agent-common/real_project/zjyd/k8s-srs.yaml b/agent-common/real_project/zjyd/k8s-srs.yaml new file mode 100644 index 0000000..0ecae1b --- /dev/null +++ b/agent-common/real_project/zjyd/k8s-srs.yaml @@ -0,0 +1,496 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-srs-cm + namespace: zjyd + labels: + cmii.app: live-srs + cmii.type: live + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 +data: + srs.rtc.conf: |- + listen 30935; + max_connections 4096; + srs_log_tank console; + srs_log_level info; + srs_log_file /home/srs.log; + daemon off; + http_api { + enabled on; + listen 1985; + crossdomain on; + } + stats { + network 0; + } + http_server { + enabled on; + listen 8080; + dir /home/hls; + } + srt_server { + enabled on; + listen 30556; + maxbw 1000000000; + connect_timeout 4000; + peerlatency 600; + recvlatency 600; + } + rtc_server { + enabled on; + listen 30090; + candidate $CANDIDATE; + } + vhost __defaultVhost__ { + http_hooks { + enabled on; + on_publish http://helm-live-op-svc-v2:8080/hooks/on_push; + } + http_remux { + enabled on; + } + rtc { + enabled on; + rtmp_to_rtc on; + rtc_to_rtmp on; + keep_bframe off; + } + tcp_nodelay on; + min_latency on; + play { + gop_cache off; + mw_latency 100; + mw_msgs 10; + } + publish { + firstpkt_timeout 8000; + normal_timeout 4000; + mr on; + } + dvr { + enabled off; + dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4; + dvr_plan session; + } + hls { + enabled on; + hls_path /home/hls; + hls_fragment 10; + hls_window 60; + hls_m3u8_file [app]/[stream].m3u8; + hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts; + hls_cleanup on; + hls_entry_prefix http://111.2.224.59:8088; + } + } +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc-exporter + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + nodePort: 30935 + - name: rtc + protocol: UDP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: rtc-tcp + protocol: TCP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: srt + protocol: UDP + port: 30556 + targetPort: 30556 + nodePort: 30556 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + nodePort: 30557 + selector: + srs-role: rtc + type: NodePort + sessionAffinity: None + externalTrafficPolicy: Cluster + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srsrtc-svc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: helm-live-srs-rtc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-srs + cmii.type: live + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 + srs-role: rtc +spec: + replicas: 1 + selector: + matchLabels: + srs-role: rtc + template: + metadata: + labels: + srs-role: rtc + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-srs-cm + items: + - key: srs.rtc.conf + path: docker.conf + defaultMode: 420 + - name: srs-vol + emptyDir: + sizeLimit: 8Gi + containers: + - name: srs-rtc + image: 192.168.10.3:8033/cmii/srs:v5.0.195 + ports: + - name: srs-rtmp + containerPort: 30935 + protocol: TCP + - name: srs-api + containerPort: 1985 + protocol: TCP + - name: srs-flv + containerPort: 8080 + protocol: TCP + - name: srs-webrtc + containerPort: 30090 + protocol: UDP + - name: srs-webrtc-tcp + containerPort: 30090 + protocol: TCP + - name: srs-srt + containerPort: 30556 + protocol: UDP + env: + - name: CANDIDATE + value: 111.2.224.59 + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /usr/local/srs/conf/docker.conf + subPath: docker.conf + - name: srs-vol + mountPath: /home/dvr + subPath: zjyd/helm-live/dvr + - name: srs-vol + mountPath: /home/hls + subPath: zjyd/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + - name: oss-adaptor + image: 192.168.10.3:8033/cmii/cmii-srs-oss-adaptor:2023-SA + env: + - name: OSS_ENDPOINT + value: 'http://192.168.10.2:9000' + - name: OSS_AK + value: cmii + - name: OSS_SK + value: 'B#923fC7mk' + - name: OSS_BUCKET + value: live-cluster-hls + - name: SRS_OP + value: 'http://helm-live-op-svc-v2:8080' + - name: MYSQL_ENDPOINT + value: 'helm-mysql:3306' + - name: MYSQL_USERNAME + value: k8s_admin + - name: MYSQL_PASSWORD + value: fP#UaH6qQ3)8 + - name: MYSQL_DATABASE + value: cmii_live_srs_op + - name: MYSQL_TABLE + value: live_segment + - name: LOG_LEVEL + value: info + - name: OSS_META + value: 'yes' + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-vol + mountPath: /cmii/share/hls + subPath: zjyd/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + serviceName: helm-live-srsrtc-svc + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: 0 + revisionHistoryLimit: 10 +--- +# live-srs部分 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helm-live-op-v2 + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live + helm.sh/chart: cmlc-live-live-op-2.0.0 + live-role: op-v2 +spec: + replicas: 1 + selector: + matchLabels: + live-role: op-v2 + template: + metadata: + labels: + live-role: op-v2 + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-op-cm-v2 + items: + - key: live.op.conf + path: bootstrap.yaml + defaultMode: 420 + containers: + - name: helm-live-op-v2 + image: 192.168.10.3:8033/cmii/cmii-live-operator:5.2.0 + ports: + - name: operator + containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 4800m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /cmii/bootstrap.yaml + subPath: bootstrap.yaml + livenessProbe: + httpGet: + path: /cmii/health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + maxSurge: 25% + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc-v2 + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + nodePort: 30333 + selector: + live-role: op-v2 + type: NodePort + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + live-role: op + type: ClusterIP + sessionAffinity: None +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-op-cm-v2 + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live +data: + live.op.conf: |- + server: + port: 8080 + spring: + main: + allow-bean-definition-overriding: true + allow-circular-references: true + application: + name: cmii-live-operator + platform: + info: + name: cmii-live-operator + description: cmii-live-operator + version: 5.7.0 + scanPackage: com.cmii.live.op + cloud: + nacos: + config: + username: developer + password: N@cos14Good + server-addr: helm-nacos:8848 + extension-configs: + - data-id: cmii-live-operator.yml + group: 5.7.0 + refresh: true + shared-configs: + - data-id: cmii-backend-system.yml + group: 5.7.0 + refresh: true + discovery: + enabled: false + + live: + engine: + type: srs + endpoint: 'http://helm-live-srs-svc:1985' + proto: + rtmp: 'rtmp://111.2.224.59:30935' + rtsp: 'rtsp://111.2.224.59:30554' + srt: 'srt://111.2.224.59:30556' + flv: 'http://111.2.224.59:30500' + hls: 'http://111.2.224.59:30500' + rtc: 'webrtc://111.2.224.59:30090' + replay: 'https://111.2.224.59:30333' + minio: + endpoint: http://192.168.10.2:9000 + access-key: cmii + secret-key: B#923fC7mk + bucket: live-cluster-hls diff --git a/agent-operator/deploy/z_bjtg/k8s-backend.yaml b/agent-common/real_project/zjyd/old/k8s-backend.yaml old mode 100755 new mode 100644 similarity index 79% rename from agent-operator/deploy/z_bjtg/k8s-backend.yaml rename to agent-common/real_project/zjyd/old/k8s-backend.yaml index ce4b99e..9809600 --- a/agent-operator/deploy/z_bjtg/k8s-backend.yaml +++ b/agent-common/real_project/zjyd/old/k8s-backend.yaml @@ -1,2517 +1,15 @@ --- apiVersion: apps/v1 kind: Deployment -metadata: - name: cmii-uav-grid-manage - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-manage - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-grid-manage - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-manage - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-grid-manage - image: 10.250.0.110:8033/cmii/cmii-uav-grid-manage:5.1.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-grid-manage - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-grid-manage - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-grid-manage - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-manage - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-grid-manage - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-integration - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-integration - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-integration - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-integration - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-integration - image: 10.250.0.110:8033/cmii/cmii-uav-integration:5.5.0-0507 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-integration - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-integration - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-integration - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-integration - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-integration - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-admin-user - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-admin-user - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-admin-user - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-admin-user - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-admin-user - image: 10.250.0.110:8033/cmii/cmii-admin-user:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-admin-user - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-admin-user - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-admin-user - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-admin-user - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-admin-user - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-cms - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-cms - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-cms - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-cms - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-cms - image: 10.250.0.110:8033/cmii/cmii-uav-cms:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-cms - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-cms - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-cms - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-cms - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-cms - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-gateway - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-gateway - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-gateway - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-gateway - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-gateway - image: 10.250.0.110:8033/cmii/cmii-uav-gateway:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-gateway - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-gateway - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-gateway - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-gateway - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-gateway - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-logger - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-logger - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-logger - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-logger - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-logger - image: 10.250.0.110:8033/cmii/cmii-uav-logger:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-logger - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-logger - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-logger - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-logger - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-logger - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-mission - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-mission - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-mission - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-mission - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-mission - image: 10.250.0.110:8033/cmii/cmii-uav-mission:5.5.0-042901 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-mission - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-mission - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-mission - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-mission - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-mission - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-notice - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-notice - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-notice - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-notice - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-notice - image: 10.250.0.110:8033/cmii/cmii-uav-notice:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-notice - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-notice - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-notice - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-notice - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-notice - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-cloud-live - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-cloud-live - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-cloud-live - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-cloud-live - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-cloud-live - image: 10.250.0.110:8033/cmii/cmii-uav-cloud-live:5.5.0-042401 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-cloud-live - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-cloud-live - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-cloud-live - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-cloud-live - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-cloud-live - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-data-post-process - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-data-post-process - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-data-post-process - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-data-post-process - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-data-post-process - image: 10.250.0.110:8033/cmii/cmii-uav-data-post-process:5.5.0-042501 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-data-post-process - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-data-post-process - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-data-post-process - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-data-post-process - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-data-post-process - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-grid-datasource - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-datasource - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-grid-datasource - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-datasource - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-grid-datasource - image: 10.250.0.110:8033/cmii/cmii-uav-grid-datasource:5.2.0-24810 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-grid-datasource - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-grid-datasource - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-grid-datasource - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-datasource - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-grid-datasource - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-mqtthandler - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-mqtthandler - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-mqtthandler - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-mqtthandler - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-mqtthandler - image: 10.250.0.110:8033/cmii/cmii-uav-mqtthandler:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-mqtthandler - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-mqtthandler - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-mqtthandler - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-mqtthandler - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-mqtthandler - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-threedsimulation - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-threedsimulation - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-threedsimulation - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-threedsimulation - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-threedsimulation - image: 10.250.0.110:8033/cmii/cmii-uav-threedsimulation:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-threedsimulation - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-threedsimulation - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-threedsimulation - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-threedsimulation - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-threedsimulation - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-tower - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-tower - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-tower - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-tower - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-tower - image: 10.250.0.110:8033/cmii/cmii-uav-tower:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-tower - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-tower - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-tower - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-tower - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-tower - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-waypoint - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-waypoint - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-waypoint - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-waypoint - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-waypoint - image: 10.250.0.110:8033/cmii/cmii-uav-waypoint:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-waypoint - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-waypoint - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-waypoint - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-waypoint - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-waypoint - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-suav-supervision - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-suav-supervision - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-suav-supervision - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-suav-supervision - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-suav-supervision - image: 10.250.0.110:8033/cmii/cmii-suav-supervision:5.4.0-032501 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-suav-supervision - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-suav-supervision - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-suav-supervision - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-suav-supervision - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-suav-supervision - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-alarm - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-alarm - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-alarm - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-alarm - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-alarm - image: 10.250.0.110:8033/cmii/cmii-uav-alarm:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-alarm - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-alarm - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-alarm - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-alarm - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-alarm - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-gis-server - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-gis-server - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-gis-server - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-gis-server - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-gis-server - image: 10.250.0.110:8033/cmii/cmii-uav-gis-server:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-gis-server - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-gis-server - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-gis-server - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-gis-server - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-gis-server - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: cmii-admin-data - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-admin-data octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: replicas: 1 strategy: @@ -2535,16 +33,16 @@ spec: - key: uavcloud.env operator: In values: - - demo + - imagePullSecrets: - name: harborsecret containers: - name: cmii-admin-data - image: 10.250.0.110:8033/cmii/cmii-admin-data:5.5.0 + image: 192.168.10.3:8033/cmii/cmii-admin-data:5.7.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME value: cmii-admin-data - name: CUST_JAVA_OPTS @@ -2558,11 +56,11 @@ spec: - name: NACOS_DISCOVERY_PORT value: "8080" - name: BIZ_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: SYS_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: IMAGE_VERSION - value: 5.5.0 + value: 5.7.0 - name: NACOS_USERNAME value: "developer" - name: NACOS_PASSWORD @@ -2580,7 +78,7 @@ spec: cpu: 200m livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2590,7 +88,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2600,7 +98,7 @@ spec: failureThreshold: 3 startupProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2612,7 +110,7 @@ spec: - name: nfs-backend-log-volume mountPath: /cmii/logs readOnly: false - subPath: bjtg/cmii-admin-data + subPath: zjyd/cmii-admin-data volumes: - name: nfs-backend-log-volume persistentVolumeClaim: @@ -2622,13 +120,13 @@ apiVersion: v1 kind: Service metadata: name: cmii-admin-data - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-admin-data octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: type: ClusterIP selector: @@ -2643,28 +141,28 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: cmii-uav-developer - namespace: bjtg + name: cmii-uas-gateway + namespace: zjyd labels: cmii.type: backend - cmii.app: cmii-uav-developer + cmii.app: cmii-uas-gateway octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: - replicas: 1 + replicas: 0 strategy: rollingUpdate: maxUnavailable: 1 selector: matchLabels: cmii.type: backend - cmii.app: cmii-uav-developer + cmii.app: cmii-uas-gateway template: metadata: labels: cmii.type: backend - cmii.app: cmii-uav-developer + cmii.app: cmii-uas-gateway spec: affinity: nodeAffinity: @@ -2674,18 +172,18 @@ spec: - key: uavcloud.env operator: In values: - - demo + - imagePullSecrets: - name: harborsecret containers: - - name: cmii-uav-developer - image: 10.250.0.110:8033/cmii/cmii-uav-developer:5.5.0 + - name: cmii-uas-gateway + image: 192.168.10.3:8033/cmii/cmii-uas-gateway:5.6.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME - value: cmii-uav-developer + value: cmii-uas-gateway - name: CUST_JAVA_OPTS value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - name: NACOS_REGISTRY @@ -2697,11 +195,11 @@ spec: - name: NACOS_DISCOVERY_PORT value: "8080" - name: BIZ_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: SYS_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: IMAGE_VERSION - value: 5.5.0 + value: 5.7.0 - name: NACOS_USERNAME value: "developer" - name: NACOS_PASSWORD @@ -2719,7 +217,7 @@ spec: cpu: 200m livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2729,7 +227,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2739,7 +237,7 @@ spec: failureThreshold: 3 startupProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2751,7 +249,7 @@ spec: - name: nfs-backend-log-volume mountPath: /cmii/logs readOnly: false - subPath: bjtg/cmii-uav-developer + subPath: zjyd/cmii-uas-gateway volumes: - name: nfs-backend-log-volume persistentVolumeClaim: @@ -2760,19 +258,158 @@ spec: apiVersion: v1 kind: Service metadata: - name: cmii-uav-developer - namespace: bjtg + name: cmii-uas-gateway + namespace: zjyd labels: cmii.type: backend - cmii.app: cmii-uav-developer + cmii.app: cmii-uas-gateway octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: type: ClusterIP selector: cmii.type: backend - cmii.app: cmii-uav-developer + cmii.app: cmii-uas-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-autowaypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-autowaypoint + image: 192.168.10.3:8033/cmii/cmii-uav-autowaypoint:4.2.0-beta + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-autowaypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-autowaypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-autowaypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-autowaypoint ports: - name: backend-tcp port: 8080 @@ -2783,15 +420,15 @@ apiVersion: apps/v1 kind: Deployment metadata: name: cmii-uav-multilink - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-uav-multilink octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: - replicas: 1 + replicas: 0 strategy: rollingUpdate: maxUnavailable: 1 @@ -2813,16 +450,16 @@ spec: - key: uavcloud.env operator: In values: - - demo + - imagePullSecrets: - name: harborsecret containers: - name: cmii-uav-multilink - image: 10.250.0.110:8033/cmii/cmii-uav-multilink:5.5.0 + image: 192.168.10.3:8033/cmii/cmii-uav-multilink:5.5.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME value: cmii-uav-multilink - name: CUST_JAVA_OPTS @@ -2836,11 +473,11 @@ spec: - name: NACOS_DISCOVERY_PORT value: "8080" - name: BIZ_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: SYS_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: IMAGE_VERSION - value: 5.5.0 + value: 5.7.0 - name: NACOS_USERNAME value: "developer" - name: NACOS_PASSWORD @@ -2858,7 +495,7 @@ spec: cpu: 200m livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2868,7 +505,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2878,7 +515,7 @@ spec: failureThreshold: 3 startupProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -2890,7 +527,7 @@ spec: - name: nfs-backend-log-volume mountPath: /cmii/logs readOnly: false - subPath: bjtg/cmii-uav-multilink + subPath: zjyd/cmii-uav-multilink volumes: - name: nfs-backend-log-volume persistentVolumeClaim: @@ -2900,13 +537,13 @@ apiVersion: v1 kind: Service metadata: name: cmii-uav-multilink - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-uav-multilink octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: type: ClusterIP selector: @@ -2920,1683 +557,15 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - name: cmii-uav-material-warehouse - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-material-warehouse - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-material-warehouse - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-material-warehouse - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-material-warehouse - image: 10.250.0.110:8033/cmii/cmii-uav-material-warehouse:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-material-warehouse - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-material-warehouse - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-material-warehouse - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-material-warehouse - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-material-warehouse - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-oauth - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-oauth - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-oauth - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-oauth - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-oauth - image: 10.250.0.110:8033/cmii/cmii-uav-oauth:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-oauth - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-oauth - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-oauth - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-oauth - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-oauth - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-process - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-process - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-process - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-process - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-process - image: 10.250.0.110:8033/cmii/cmii-uav-process:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-process - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-process - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-process - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-process - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-process - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-admin-gateway - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-admin-gateway - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-admin-gateway - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-admin-gateway - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-admin-gateway - image: 10.250.0.110:8033/cmii/cmii-admin-gateway:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-admin-gateway - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-admin-gateway - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-admin-gateway - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-admin-gateway - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-admin-gateway - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-grid-engine - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-engine - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-grid-engine - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-engine - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-grid-engine - image: 10.250.0.110:8033/cmii/cmii-uav-grid-engine:5.1.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-grid-engine - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-grid-engine - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-grid-engine - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-grid-engine - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-grid-engine - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-industrial-portfolio - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-industrial-portfolio - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-industrial-portfolio - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-industrial-portfolio - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-industrial-portfolio - image: 10.250.0.110:8033/cmii/cmii-uav-industrial-portfolio:5.5.0-050801 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-industrial-portfolio - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-industrial-portfolio - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-industrial-portfolio - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-industrial-portfolio - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-industrial-portfolio - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-device - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-device - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-device - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-device - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-device - image: 10.250.0.110:8033/cmii/cmii-uav-device:5.5.0-042301 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-device - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-device - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-device - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-device - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-device - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-app-release - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-app-release - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-app-release - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-app-release - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-app-release - image: 10.250.0.110:8033/cmii/cmii-app-release:4.2.0-validation - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-app-release - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-app-release - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-app-release - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-app-release - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-app-release - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-open-gateway - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-open-gateway - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-open-gateway - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-open-gateway - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-open-gateway - image: 10.250.0.110:8033/cmii/cmii-open-gateway:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-open-gateway - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-open-gateway - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-open-gateway - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-open-gateway - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-open-gateway - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-brain - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-brain - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-brain - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-brain - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-brain - image: 10.250.0.110:8033/cmii/cmii-uav-brain:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-brain - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-brain - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-brain - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-brain - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-brain - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-kpi-monitor - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-kpi-monitor - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-kpi-monitor - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-kpi-monitor - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-kpi-monitor - image: 10.250.0.110:8033/cmii/cmii-uav-kpi-monitor:5.5.0 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-kpi-monitor - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-kpi-monitor - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-kpi-monitor - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-kpi-monitor - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-kpi-monitor - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cmii-uav-surveillance - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-surveillance - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - replicas: 1 - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: backend - cmii.app: cmii-uav-surveillance - template: - metadata: - labels: - cmii.type: backend - cmii.app: cmii-uav-surveillance - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: uavcloud.env - operator: In - values: - - demo - imagePullSecrets: - - name: harborsecret - containers: - - name: cmii-uav-surveillance - image: 10.250.0.110:8033/cmii/cmii-uav-surveillance:5.5.0-042901 - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: bjtg - - name: APPLICATION_NAME - value: cmii-uav-surveillance - - name: CUST_JAVA_OPTS - value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - - name: NACOS_REGISTRY - value: "helm-nacos:8848" - - name: NACOS_DISCOVERY_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NACOS_DISCOVERY_PORT - value: "8080" - - name: BIZ_CONFIG_GROUP - value: 5.5.0 - - name: SYS_CONFIG_GROUP - value: 5.5.0 - - name: IMAGE_VERSION - value: 5.5.0 - - name: NACOS_USERNAME - value: "developer" - - name: NACOS_PASSWORD - value: "Deve@9128201" - ports: - - name: pod-port - containerPort: 8080 - protocol: TCP - resources: - limits: - memory: 2Gi - cpu: "2" - requests: - memory: 200Mi - cpu: 200m - livenessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /cmii/ping - port: pod-port - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 3 - periodSeconds: 20 - successThreshold: 1 - failureThreshold: 5 - volumeMounts: - - name: nfs-backend-log-volume - mountPath: /cmii/logs - readOnly: false - subPath: bjtg/cmii-uav-surveillance - volumes: - - name: nfs-backend-log-volume - persistentVolumeClaim: - claimName: nfs-backend-log-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: cmii-uav-surveillance - namespace: bjtg - labels: - cmii.type: backend - cmii.app: cmii-uav-surveillance - octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.type: backend - cmii.app: cmii-uav-surveillance - ports: - - name: backend-tcp - port: 8080 - protocol: TCP - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: cmii-uav-user - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-uav-user octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: replicas: 1 strategy: @@ -4620,16 +589,16 @@ spec: - key: uavcloud.env operator: In values: - - demo + - imagePullSecrets: - name: harborsecret containers: - name: cmii-uav-user - image: 10.250.0.110:8033/cmii/cmii-uav-user:5.5.0-042801 + image: 192.168.10.3:8033/cmii/cmii-uav-user:5.7.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME value: cmii-uav-user - name: CUST_JAVA_OPTS @@ -4643,11 +612,11 @@ spec: - name: NACOS_DISCOVERY_PORT value: "8080" - name: BIZ_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: SYS_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: IMAGE_VERSION - value: 5.5.0 + value: 5.7.0 - name: NACOS_USERNAME value: "developer" - name: NACOS_PASSWORD @@ -4665,7 +634,7 @@ spec: cpu: 200m livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4675,7 +644,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4685,7 +654,7 @@ spec: failureThreshold: 3 startupProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4697,7 +666,7 @@ spec: - name: nfs-backend-log-volume mountPath: /cmii/logs readOnly: false - subPath: bjtg/cmii-uav-user + subPath: zjyd/cmii-uav-user volumes: - name: nfs-backend-log-volume persistentVolumeClaim: @@ -4707,13 +676,13 @@ apiVersion: v1 kind: Service metadata: name: cmii-uav-user - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-uav-user octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: type: ClusterIP selector: @@ -4728,28 +697,28 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: cmii-uav-airspace - namespace: bjtg + name: cmii-uav-grid-datasource + namespace: zjyd labels: cmii.type: backend - cmii.app: cmii-uav-airspace + cmii.app: cmii-uav-grid-datasource octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: - replicas: 1 + replicas: 0 strategy: rollingUpdate: maxUnavailable: 1 selector: matchLabels: cmii.type: backend - cmii.app: cmii-uav-airspace + cmii.app: cmii-uav-grid-datasource template: metadata: labels: cmii.type: backend - cmii.app: cmii-uav-airspace + cmii.app: cmii-uav-grid-datasource spec: affinity: nodeAffinity: @@ -4759,18 +728,18 @@ spec: - key: uavcloud.env operator: In values: - - demo + - imagePullSecrets: - name: harborsecret containers: - - name: cmii-uav-airspace - image: 10.250.0.110:8033/cmii/cmii-uav-airspace:5.5.0-042401 + - name: cmii-uav-grid-datasource + image: 192.168.10.3:8033/cmii/cmii-uav-grid-datasource:5.2.0-24810 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME - value: cmii-uav-airspace + value: cmii-uav-grid-datasource - name: CUST_JAVA_OPTS value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - name: NACOS_REGISTRY @@ -4782,11 +751,11 @@ spec: - name: NACOS_DISCOVERY_PORT value: "8080" - name: BIZ_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: SYS_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: IMAGE_VERSION - value: 5.5.0 + value: 5.7.0 - name: NACOS_USERNAME value: "developer" - name: NACOS_PASSWORD @@ -4804,7 +773,7 @@ spec: cpu: 200m livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4814,7 +783,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4824,7 +793,7 @@ spec: failureThreshold: 3 startupProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4836,7 +805,7 @@ spec: - name: nfs-backend-log-volume mountPath: /cmii/logs readOnly: false - subPath: bjtg/cmii-uav-airspace + subPath: zjyd/cmii-uav-grid-datasource volumes: - name: nfs-backend-log-volume persistentVolumeClaim: @@ -4845,19 +814,19 @@ spec: apiVersion: v1 kind: Service metadata: - name: cmii-uav-airspace - namespace: bjtg + name: cmii-uav-grid-datasource + namespace: zjyd labels: cmii.type: backend - cmii.app: cmii-uav-airspace + cmii.app: cmii-uav-grid-datasource octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: type: ClusterIP selector: cmii.type: backend - cmii.app: cmii-uav-airspace + cmii.app: cmii-uav-grid-datasource ports: - name: backend-tcp port: 8080 @@ -4867,28 +836,28 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: cmii-uav-autowaypoint - namespace: bjtg + name: cmii-uav-alarm + namespace: zjyd labels: cmii.type: backend - cmii.app: cmii-uav-autowaypoint + cmii.app: cmii-uav-alarm octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: - replicas: 1 + replicas: 0 strategy: rollingUpdate: maxUnavailable: 1 selector: matchLabels: cmii.type: backend - cmii.app: cmii-uav-autowaypoint + cmii.app: cmii-uav-alarm template: metadata: labels: cmii.type: backend - cmii.app: cmii-uav-autowaypoint + cmii.app: cmii-uav-alarm spec: affinity: nodeAffinity: @@ -4898,18 +867,18 @@ spec: - key: uavcloud.env operator: In values: - - demo + - imagePullSecrets: - name: harborsecret containers: - - name: cmii-uav-autowaypoint - image: 10.250.0.110:8033/cmii/cmii-uav-autowaypoint:4.2.0-beta + - name: cmii-uav-alarm + image: 192.168.10.3:8033/cmii/cmii-uav-alarm:5.7.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME - value: cmii-uav-autowaypoint + value: cmii-uav-alarm - name: CUST_JAVA_OPTS value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" - name: NACOS_REGISTRY @@ -4921,11 +890,11 @@ spec: - name: NACOS_DISCOVERY_PORT value: "8080" - name: BIZ_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: SYS_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: IMAGE_VERSION - value: 5.5.0 + value: 5.7.0 - name: NACOS_USERNAME value: "developer" - name: NACOS_PASSWORD @@ -4943,7 +912,7 @@ spec: cpu: 200m livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4953,7 +922,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4963,7 +932,7 @@ spec: failureThreshold: 3 startupProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -4975,7 +944,7 @@ spec: - name: nfs-backend-log-volume mountPath: /cmii/logs readOnly: false - subPath: bjtg/cmii-uav-autowaypoint + subPath: zjyd/cmii-uav-alarm volumes: - name: nfs-backend-log-volume persistentVolumeClaim: @@ -4984,19 +953,992 @@ spec: apiVersion: v1 kind: Service metadata: - name: cmii-uav-autowaypoint - namespace: bjtg + name: cmii-uav-alarm + namespace: zjyd labels: cmii.type: backend - cmii.app: cmii-uav-autowaypoint + cmii.app: cmii-uav-alarm octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: type: ClusterIP selector: cmii.type: backend - cmii.app: cmii-uav-autowaypoint + cmii.app: cmii-uav-alarm + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-surveillance + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-surveillance + image: 192.168.10.3:8033/cmii/cmii-uav-surveillance:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-surveillance + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-surveillance + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-surveillance + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-surveillance + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-surveillance + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-manage + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-manage + image: 192.168.10.3:8033/cmii/cmii-uav-grid-manage:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-grid-manage + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-grid-manage + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-manage + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-manage + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-integration + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-integration + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-integration + image: 192.168.10.3:8033/cmii/cmii-uav-integration:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-integration + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-integration + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-integration + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-integration + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-integration + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-material-warehouse + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-material-warehouse + image: 192.168.10.3:8033/cmii/cmii-uav-material-warehouse:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-material-warehouse + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-material-warehouse + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-material-warehouse + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-material-warehouse + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-notice + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-notice + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-notice + image: 192.168.10.3:8033/cmii/cmii-uav-notice:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-notice + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-notice + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-notice + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-notice + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-notice + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mqtthandler + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mqtthandler + image: 192.168.10.3:8033/cmii/cmii-uav-mqtthandler:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-mqtthandler + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-mqtthandler + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mqtthandler + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mqtthandler + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-process + image: 192.168.10.3:8033/cmii/cmii-uav-process:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-process ports: - name: backend-tcp port: 8080 @@ -5007,13 +1949,13 @@ apiVersion: apps/v1 kind: Deployment metadata: name: cmii-uav-emergency - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-uav-emergency octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: replicas: 1 strategy: @@ -5037,16 +1979,16 @@ spec: - key: uavcloud.env operator: In values: - - demo + - imagePullSecrets: - name: harborsecret containers: - name: cmii-uav-emergency - image: 10.250.0.110:8033/cmii/cmii-uav-emergency:5.3.0 + image: 192.168.10.3:8033/cmii/cmii-uav-emergency:5.7.0 imagePullPolicy: Always env: - name: K8S_NAMESPACE - value: bjtg + value: zjyd - name: APPLICATION_NAME value: cmii-uav-emergency - name: CUST_JAVA_OPTS @@ -5060,11 +2002,11 @@ spec: - name: NACOS_DISCOVERY_PORT value: "8080" - name: BIZ_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: SYS_CONFIG_GROUP - value: 5.5.0 + value: 5.7.0 - name: IMAGE_VERSION - value: 5.5.0 + value: 5.7.0 - name: NACOS_USERNAME value: "developer" - name: NACOS_PASSWORD @@ -5082,7 +2024,7 @@ spec: cpu: 200m livenessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -5092,7 +2034,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -5102,7 +2044,7 @@ spec: failureThreshold: 3 startupProbe: httpGet: - path: /cmii/ping + path: /cmii/health port: pod-port scheme: HTTP initialDelaySeconds: 60 @@ -5114,7 +2056,7 @@ spec: - name: nfs-backend-log-volume mountPath: /cmii/logs readOnly: false - subPath: bjtg/cmii-uav-emergency + subPath: zjyd/cmii-uav-emergency volumes: - name: nfs-backend-log-volume persistentVolumeClaim: @@ -5124,13 +2066,13 @@ apiVersion: v1 kind: Service metadata: name: cmii-uav-emergency - namespace: bjtg + namespace: zjyd labels: cmii.type: backend cmii.app: cmii-uav-emergency octopus/control: backend-app-1.0.0 app.kubernetes.io/managed-by: octopus - app.kubernetes.io/app-version: 5.5.0 + app.kubernetes.io/app-version: 5.7.0 spec: type: ClusterIP selector: @@ -5141,3 +2083,3617 @@ spec: port: 8080 protocol: TCP targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-app-release + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-app-release + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-app-release + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-app-release + image: 192.168.10.3:8033/cmii/cmii-app-release:4.2.0-validation + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-app-release + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-app-release + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-app-release + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-app-release + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-app-release + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-industrial-portfolio + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-industrial-portfolio + image: 192.168.10.3:8033/cmii/cmii-uav-industrial-portfolio:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-industrial-portfolio + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-industrial-portfolio + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-industrial-portfolio + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-industrial-portfolio + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-waypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-waypoint + image: 192.168.10.3:8033/cmii/cmii-uav-waypoint:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-waypoint + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-waypoint + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-waypoint + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-waypoint + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-waypoint + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cms + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cms + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cms + image: 192.168.10.3:8033/cmii/cmii-uav-cms:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-cms + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-cms + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cms + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cms + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cms + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-logger + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-logger + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-logger + image: 192.168.10.3:8033/cmii/cmii-uav-logger:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-logger + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-logger + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-logger + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-logger + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-logger + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-mission + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-mission + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-mission + image: 192.168.10.3:8033/cmii/cmii-uav-mission:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-mission + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-mission + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-mission + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-mission + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-mission + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gateway + image: 192.168.10.3:8033/cmii/cmii-uav-gateway:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-oauth + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-oauth + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-oauth + image: 192.168.10.3:8033/cmii/cmii-uav-oauth:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-oauth + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-oauth + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-oauth + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-oauth + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-oauth + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-supervision + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-suav-supervision + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-supervision + image: 192.168.10.3:8033/cmii/cmii-suav-supervision:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-suav-supervision + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-suav-supervision + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-supervision + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-suav-supervision + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-suav-supervision + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uas-lifecycle + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uas-lifecycle + image: 192.168.10.3:8033/cmii/cmii-uas-lifecycle:5.6.0-30403-071802 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uas-lifecycle + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uas-lifecycle + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uas-lifecycle + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uas-lifecycle + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-gis-server + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-gis-server + image: 192.168.10.3:8033/cmii/cmii-uav-gis-server:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-gis-server + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-gis-server + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-gis-server + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-gis-server + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-gis-server + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-user + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-user + image: 192.168.10.3:8033/cmii/cmii-admin-user:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-admin-user + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-admin-user + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-user + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-open-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-open-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-open-gateway + image: 192.168.10.3:8033/cmii/cmii-open-gateway:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-open-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-open-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-open-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-open-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-open-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-threedsimulation + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-threedsimulation + image: 192.168.10.3:8033/cmii/cmii-uav-threedsimulation:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-threedsimulation + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-threedsimulation + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-threedsimulation + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-threedsimulation + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-cloud-live + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-cloud-live + image: 192.168.10.3:8033/cmii/cmii-uav-cloud-live:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-cloud-live + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-cloud-live + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-cloud-live + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-cloud-live + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-gateway + image: 192.168.10.3:8033/cmii/cmii-admin-gateway:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-admin-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-admin-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-brain + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-brain + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-brain + image: 192.168.10.3:8033/cmii/cmii-uav-brain:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-brain + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-brain + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-brain + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-brain + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-brain + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-tower + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-tower + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-tower + image: 192.168.10.3:8033/cmii/cmii-uav-tower:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-tower + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-tower + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-tower + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-tower + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-tower + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-sense-adapter + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-sense-adapter + image: 192.168.10.3:8033/cmii/cmii-uav-sense-adapter:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-sense-adapter + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-sense-adapter + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-sense-adapter + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-sense-adapter + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-airspace + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-airspace + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-airspace + image: 192.168.10.3:8033/cmii/cmii-uav-airspace:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-airspace + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-airspace + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-airspace + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-airspace + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-airspace + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-grid-engine + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-grid-engine + image: 192.168.10.3:8033/cmii/cmii-uav-grid-engine:5.1.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-grid-engine + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-grid-engine + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-grid-engine + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-grid-engine + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-depotautoreturn + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-depotautoreturn + image: 192.168.10.3:8033/cmii/cmii-uav-depotautoreturn:5.5.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-depotautoreturn + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-depotautoreturn + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-depotautoreturn + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-depotautoreturn + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-data-post-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-data-post-process + image: 192.168.10.3:8033/cmii/cmii-uav-data-post-process:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-data-post-process + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-data-post-process + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-data-post-process + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-data-post-process + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-developer + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-developer + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-developer + image: 192.168.10.3:8033/cmii/cmii-uav-developer:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-developer + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-developer + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-developer + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-developer + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-developer + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-iam-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-iam-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-iam-gateway + image: 192.168.10.3:8033/cmii/cmii-iam-gateway:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-iam-gateway + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-iam-gateway + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-iam-gateway + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-iam-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-iam-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-device + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-uav-device + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-uav-device + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-device + image: 192.168.10.3:8033/cmii/cmii-uav-device:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-device + - name: CUST_JAVA_OPTS + value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.7.0 + - name: SYS_CONFIG_GROUP + value: 5.7.0 + - name: IMAGE_VERSION + value: 5.7.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: + memory: 2Gi + cpu: "2" + requests: + memory: 200Mi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/health + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: nfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjyd/cmii-uav-device + volumes: + - name: nfs-backend-log-volume + persistentVolumeClaim: + claimName: nfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-device + namespace: zjyd + labels: + cmii.type: backend + cmii.app: cmii-uav-device + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-uav-device + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 diff --git a/agent-common/real_project/zjyd/old/k8s-configmap.yaml b/agent-common/real_project/zjyd/old/k8s-configmap.yaml new file mode 100644 index 0000000..b88e1c5 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-configmap.yaml @@ -0,0 +1,420 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-armypeople + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "armypeople", + AppClientId: "APP_UIegse6Lfou9pO1U" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-cmsportal + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "cmsportal", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-visualization + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "visualization", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-logistics + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "logistics", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-share + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "share", + AppClientId: "APP_4lVSVI0ZGxTssir8" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervisionh5 + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "supervisionh5", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-multiterminal + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "multiterminal", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-open + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "open", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-seniclive + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "seniclive", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-splice + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "splice", + AppClientId: "APP_zE0M3sTRXrCIJS8Y" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-jiangsuwenlv + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "jiangsuwenlv", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-qingdao + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "qingdao", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-traffic + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "traffic", + AppClientId: "APP_Jc8i2wOQ1t73QEJS" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-hljtt + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "hljtt", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-pangu + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-base + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "base", + AppClientId: "APP_9LY41OaKSqk2btY0" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-detection + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "detection", + AppClientId: "APP_FDHW2VLVDWPnnOCy" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-security + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "security", + AppClientId: "APP_JUSEMc7afyWXxvE7" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-threedsimulation + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "threedsimulation", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uasms + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "uasms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-uas + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "uas", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-dispatchh5 + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "dispatchh5", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-supervision + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "supervision", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-ai-brain + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "ai-brain", + AppClientId: "APP_rafnuCAmBESIVYMH" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-mws + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "mws", + AppClientId: "APP_uKniXPELlRERBBwK" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-emergency + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "emergency", + AppClientId: "APP_aGsTAY1uMZrpKdfk" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-media + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "media", + AppClientId: "APP_4AU8lbifESQO4FD6" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-oms + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "oms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-securityh5 + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "securityh5", + AppClientId: "APP_N3ImO0Ubfu9peRHD" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tenant-prefix-qinghaitourism + namespace: zjyd +data: + ingress-config.js: |- + var __GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "111.2.224.59:8088", + ApplicationShortName: "qinghaitourism", + AppClientId: "empty" + } diff --git a/agent-common/real_project/zjyd/old/k8s-dashboard.yaml b/agent-common/real_project/zjyd/old/k8s-dashboard.yaml new file mode 100644 index 0000000..be1e398 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-dashboard.yaml @@ -0,0 +1,309 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + name: kubernetes-dashboard + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + nodePort: 30554 + selector: + k8s-app: kubernetes-dashboard + type: NodePort + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kube-system +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kube-system +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kube-system + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [ "" ] + resources: [ "secrets" ] + resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] + verbs: [ "get", "update", "delete" ] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [ "" ] + resources: [ "configmaps" ] + resourceNames: [ "kubernetes-dashboard-settings" ] + verbs: [ "get", "update" ] + # Allow Dashboard to get metrics. + - apiGroups: [ "" ] + resources: [ "services" ] + resourceNames: [ "heapster", "dashboard-metrics-scraper" ] + verbs: [ "proxy" ] + - apiGroups: [ "" ] + resources: [ "services/proxy" ] + resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] + verbs: [ "get" ] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: [ "metrics.k8s.io" ] + resources: [ "pods", "nodes" ] + verbs: [ "get", "list", "watch" ] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: kubernetes-dashboard + image: 192.168.10.3:8033/cmii/dashboard:v2.0.1 + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kube-system + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: { } + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: 192.168.10.3:8033/cmii/metrics-scraper:v1.0.4 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: { } +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: admin-user + namespace: kube-system diff --git a/agent-common/real_project/zjyd/old/k8s-emqx.yaml b/agent-common/real_project/zjyd/old/k8s-emqx.yaml new file mode 100644 index 0000000..8357512 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-emqx.yaml @@ -0,0 +1,274 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-emqxs + namespace: zjyd +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-env + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +data: + EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443" + EMQX_NAME: "helm-emqxs" + EMQX_CLUSTER__DISCOVERY: "k8s" + EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs" + EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless" + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" + EMQX_CLUSTER__K8S__namespace: "zjyd" + EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" + EMQX_ALLOW_ANONYMOUS: "false" + EMQX_ACL_NOMATCH: "deny" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-cm + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +data: + emqx_auth_mnesia.conf: |- + auth.mnesia.password_hash = sha256 + + # clientid 认证数据 + # auth.client.1.clientid = admin + # auth.client.1.password = 4YPk*DS%+5 + + ## username 认证数据 + auth.user.1.username = admin + auth.user.1.password = odD8#Ve7.B + auth.user.2.username = cmlc + auth.user.2.password = odD8#Ve7.B + + acl.conf: |- + {allow, {user, "admin"}, pubsub, ["admin/#"]}. + {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. + {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. + {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. + {allow, all}. + + loaded_plugins: |- + {emqx_auth_mnesia,true}. + {emqx_auth_mnesia,true}. + {emqx_management, true}. + {emqx_recon, true}. + {emqx_retainer, false}. + {emqx_dashboard, true}. + {emqx_telemetry, true}. + {emqx_rule_engine, true}. + {emqx_bridge_mqtt, false}. +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-emqxs + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + replicas: 1 + serviceName: helm-emqxs-headless + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + template: + metadata: + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + spec: + affinity: { } + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-emqxs + containers: + - name: helm-emqxs + image: 192.168.10.3:8033/cmii/emqx:4.4.9 + imagePullPolicy: Always + ports: + - name: mqtt + containerPort: 1883 + - name: mqttssl + containerPort: 8883 + - name: mgmt + containerPort: 8081 + - name: ws + containerPort: 8083 + - name: wss + containerPort: 8084 + - name: dashboard + containerPort: 18083 + - name: ekka + containerPort: 4370 + envFrom: + - configMapRef: + name: helm-emqxs-env + resources: { } + volumeMounts: + - name: emqx-data + mountPath: "/opt/emqx/data/mnesia" + readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf" + subPath: emqx_auth_mnesia.conf + readOnly: false + # - name: helm-emqxs-cm + # mountPath: "/opt/emqx/etc/acl.conf" + # subPath: "acl.conf" + # readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/data/loaded_plugins" + subPath: loaded_plugins + readOnly: false + volumes: + - name: emqx-data + persistentVolumeClaim: + claimName: helm-emqxs + - name: helm-emqxs-cm + configMap: + name: helm-emqxs-cm + items: + - key: emqx_auth_mnesia.conf + path: emqx_auth_mnesia.conf + - key: acl.conf + path: acl.conf + - key: loaded_plugins + path: loaded_plugins +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: zjyd +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - watch + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: zjyd +subjects: + - kind: ServiceAccount + name: helm-emqxs + namespace: zjyd +roleRef: + kind: Role + name: helm-emqxs + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + type: NodePort + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - port: 1883 + name: mqtt + targetPort: 1883 + nodePort: 31883 + - port: 18083 + name: dashboard + targetPort: 18083 + nodePort: 38085 + - port: 8083 + name: mqtt-websocket + targetPort: 8083 + nodePort: 38083 +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs-headless + namespace: zjyd + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + clusterIP: None + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - name: mqtt + port: 1883 + protocol: TCP + targetPort: 1883 + - name: mqttssl + port: 8883 + protocol: TCP + targetPort: 8883 + - name: mgmt + port: 8081 + protocol: TCP + targetPort: 8081 + - name: websocket + port: 8083 + protocol: TCP + targetPort: 8083 + - name: wss + port: 8084 + protocol: TCP + targetPort: 8084 + - name: dashboard + port: 18083 + protocol: TCP + targetPort: 18083 + - name: ekka + port: 4370 + protocol: TCP + targetPort: 4370 diff --git a/agent-common/real_project/zjyd/old/k8s-frontend.yaml b/agent-common/real_project/zjyd/old/k8s-frontend.yaml new file mode 100644 index 0000000..c86d38b --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-frontend.yaml @@ -0,0 +1,1983 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-cm + namespace: zjyd + labels: + cmii.type: frontend +data: + nginx.conf: | + server { + listen 9528; + server_name localhost; + gzip on; + + location / { + root /home/cmii-platform/dist; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root html; + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-cms-portal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-cms-portal + image: 192.168.10.3:8033/cmii/cmii-uav-platform-cms-portal:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-cms-portal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-cmsportal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-cms-portal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-cms-portal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-securityh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-securityh5 + image: 192.168.10.3:8033/cmii/cmii-uav-platform-securityh5:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-securityh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-securityh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-securityh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-securityh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-armypeople + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-armypeople + image: 192.168.10.3:8033/cmii/cmii-uav-platform-armypeople:5.7.0-29668-071901 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-armypeople + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-armypeople + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-armypeople + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-armypeople + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-splice + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-splice + image: 192.168.10.3:8033/cmii/cmii-uav-platform-splice:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-splice + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-splice + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-splice + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-splice + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-detection + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-detection + image: 192.168.10.3:8033/cmii/cmii-uav-platform-detection:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-detection + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-detection + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-detection + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-detection + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-logistics + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-logistics + image: 192.168.10.3:8033/cmii/cmii-uav-platform-logistics:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-logistics + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-logistics + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-logistics + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-logistics + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-security + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-security + image: 192.168.10.3:8033/cmii/cmii-uav-platform-security:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-security + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-security + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-security + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-security + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uasms + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uasms + image: 192.168.10.3:8033/cmii/cmii-uav-platform-uasms:5.7.0-071815 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-uasms + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uasms + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uasms + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uasms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-threedsimulation + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-threedsimulation + image: 192.168.10.3:8033/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-threedsimulation + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-threedsimulation + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-threedsimulation + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-threedsimulation + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-open + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-open + image: 192.168.10.3:8033/cmii/cmii-uav-platform-open:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-open + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-open + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-open + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-open + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-oms + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-oms + image: 192.168.10.3:8033/cmii/cmii-uav-platform-oms:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-oms + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-oms + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-oms + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-oms + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-share + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-share + image: 192.168.10.3:8033/cmii/cmii-uav-platform-share:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-share + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-share + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-share + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-share + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform + image: 192.168.10.3:8033/cmii/cmii-uav-platform:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-pangu + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-platform-supervisionh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-platform-supervisionh5 + image: 192.168.10.3:8033/cmii/cmii-suav-platform-supervisionh5:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-suav-platform-supervisionh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-supervisionh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-platform-supervisionh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervisionh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-uas + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 0 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-uas + image: 192.168.10.3:8033/cmii/cmii-uav-platform-uas:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-uas + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-uas + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-uas + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-uas + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-media + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-media + image: 192.168.10.3:8033/cmii/cmii-uav-platform-media:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-media + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-media + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-media + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-media + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-suav-platform-supervision + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-suav-platform-supervision + image: 192.168.10.3:8033/cmii/cmii-suav-platform-supervision:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-suav-platform-supervision + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-supervision + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-suav-platform-supervision + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-suav-platform-supervision + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-multiterminal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-multiterminal + image: 192.168.10.3:8033/cmii/cmii-uav-platform-multiterminal:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-multiterminal + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-multiterminal + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-multiterminal + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-multiterminal + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-dispatchh5 + image: 192.168.10.3:8033/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-dispatchh5 + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-dispatchh5 + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-dispatchh5 + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-dispatchh5 + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-ai-brain + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-ai-brain + image: 192.168.10.3:8033/cmii/cmii-uav-platform-ai-brain:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-ai-brain + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-ai-brain + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-ai-brain + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-ai-brain + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-mws + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-mws + image: 192.168.10.3:8033/cmii/cmii-uav-platform-mws:5.7.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-mws + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-mws + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-mws + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-mws + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.7.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-uav-platform-emergency-rescue + image: 192.168.10.3:8033/cmii/cmii-uav-platform-emergency-rescue:5.6.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjyd + - name: APPLICATION_NAME + value: cmii-uav-platform-emergency-rescue + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/nginx.conf + subPath: nginx.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-emergency + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: zjyd + labels: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.7.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-uav-platform-emergency-rescue + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 diff --git a/agent-common/real_project/zjyd/old/k8s-ingress.yaml b/agent-common/real_project/zjyd/old/k8s-ingress.yaml new file mode 100644 index 0000000..cc9145d --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-ingress.yaml @@ -0,0 +1,604 @@ +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: frontend-applications-ingress + namespace: zjyd + labels: + type: frontend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^(/supervision)$ $1/ redirect; + rewrite ^(/supervisionh5)$ $1/ redirect; + rewrite ^(/pangu)$ $1/ redirect; + rewrite ^(/ai-brain)$ $1/ redirect; + rewrite ^(/armypeople)$ $1/ redirect; + rewrite ^(/base)$ $1/ redirect; + rewrite ^(/cmsportal)$ $1/ redirect; + rewrite ^(/detection)$ $1/ redirect; + rewrite ^(/dispatchh5)$ $1/ redirect; + rewrite ^(/emergency)$ $1/ redirect; + rewrite ^(/hljtt)$ $1/ redirect; + rewrite ^(/jiangsuwenlv)$ $1/ redirect; + rewrite ^(/logistics)$ $1/ redirect; + rewrite ^(/media)$ $1/ redirect; + rewrite ^(/multiterminal)$ $1/ redirect; + rewrite ^(/mws)$ $1/ redirect; + rewrite ^(/oms)$ $1/ redirect; + rewrite ^(/open)$ $1/ redirect; + rewrite ^(/qingdao)$ $1/ redirect; + rewrite ^(/qinghaitourism)$ $1/ redirect; + rewrite ^(/security)$ $1/ redirect; + rewrite ^(/securityh5)$ $1/ redirect; + rewrite ^(/seniclive)$ $1/ redirect; + rewrite ^(/share)$ $1/ redirect; + rewrite ^(/splice)$ $1/ redirect; + rewrite ^(/threedsimulation)$ $1/ redirect; + rewrite ^(/traffic)$ $1/ redirect; + rewrite ^(/uas)$ $1/ redirect; + rewrite ^(/uasms)$ $1/ redirect; + rewrite ^(/visualization)$ $1/ redirect; +spec: + rules: + - host: fake-domain.zjyd.io + http: + paths: + - path: /?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /supervision/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervision + servicePort: 9528 + - path: /supervisionh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervisionh5 + servicePort: 9528 + - path: /pangu/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /ai-brain/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-ai-brain + servicePort: 9528 + - path: /armypeople/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-armypeople + servicePort: 9528 + - path: /base/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-base + servicePort: 9528 + - path: /cmsportal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /detection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-detection + servicePort: 9528 + - path: /dispatchh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-dispatchh5 + servicePort: 9528 + - path: /emergency/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-emergency-rescue + servicePort: 9528 + - path: /hljtt/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-hljtt + servicePort: 9528 + - path: /jiangsuwenlv/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-jiangsuwenlv + servicePort: 9528 + - path: /logistics/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-logistics + servicePort: 9528 + - path: /media/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-media + servicePort: 9528 + - path: /multiterminal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-multiterminal + servicePort: 9528 + - path: /mws/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws + servicePort: 9528 + - path: /oms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-oms + servicePort: 9528 + - path: /open/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-open + servicePort: 9528 + - path: /qingdao/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qingdao + servicePort: 9528 + - path: /qinghaitourism/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qinghaitourism + servicePort: 9528 + - path: /security/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-security + servicePort: 9528 + - path: /securityh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-securityh5 + servicePort: 9528 + - path: /seniclive/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-seniclive + servicePort: 9528 + - path: /share/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-share + servicePort: 9528 + - path: /splice/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice + servicePort: 9528 + - path: /threedsimulation/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-threedsimulation + servicePort: 9528 + - path: /traffic/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-traffic + servicePort: 9528 + - path: /uas/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uas + servicePort: 9528 + - path: /uasms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-uasms + servicePort: 9528 + - path: /visualization/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-visualization + servicePort: 9528 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: backend-applications-ingress + namespace: zjyd + labels: + type: backend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" +spec: + rules: + - host: cmii-admin-data.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-data + servicePort: 8080 + - host: cmii-admin-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - host: cmii-admin-user.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-user + servicePort: 8080 + - host: cmii-app-release.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-app-release + servicePort: 8080 + - host: cmii-open-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - host: cmii-suav-supervision.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-supervision + servicePort: 8080 + - host: cmii-uas-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-gateway + servicePort: 8080 + - host: cmii-uas-lifecycle.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uas-lifecycle + servicePort: 8080 + - host: cmii-uav-airspace.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-airspace + servicePort: 8080 + - host: cmii-uav-alarm.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-alarm + servicePort: 8080 + - host: cmii-uav-autowaypoint.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-autowaypoint + servicePort: 8080 + - host: cmii-uav-brain.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-brain + servicePort: 8080 + - host: cmii-uav-bridge.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-bridge + servicePort: 8080 + - host: cmii-uav-cloud-live.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cloud-live + servicePort: 8080 + - host: cmii-uav-clusters.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-clusters + servicePort: 8080 + - host: cmii-uav-cms.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cms + servicePort: 8080 + - host: cmii-uav-data-post-process.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-data-post-process + servicePort: 8080 + - host: cmii-uav-depotautoreturn.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-depotautoreturn + servicePort: 8080 + - host: cmii-uav-developer.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-developer + servicePort: 8080 + - host: cmii-uav-device.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-device + servicePort: 8080 + - host: cmii-uav-emergency.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-emergency + servicePort: 8080 + - host: cmii-uav-gateway.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 + - host: cmii-uav-gis-server.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gis-server + servicePort: 8080 + - host: cmii-uav-grid-datasource.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-datasource + servicePort: 8080 + - host: cmii-uav-grid-engine.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-engine + servicePort: 8080 + - host: cmii-uav-grid-manage.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-manage + servicePort: 8080 + - host: cmii-uav-industrial-portfolio.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-industrial-portfolio + servicePort: 8080 + - host: cmii-uav-integration.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-integration + servicePort: 8080 + - host: cmii-uav-kpi-monitor.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-kpi-monitor + servicePort: 8080 + - host: cmii-uav-logger.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-logger + servicePort: 8080 + - host: cmii-uav-material-warehouse.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-material-warehouse + servicePort: 8080 + - host: cmii-uav-mission.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mission + servicePort: 8080 + - host: cmii-uav-mqtthandler.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mqtthandler + servicePort: 8080 + - host: cmii-uav-multilink.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-multilink + servicePort: 8080 + - host: cmii-uav-notice.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-notice + servicePort: 8080 + - host: cmii-uav-oauth.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-oauth + servicePort: 8080 + - host: cmii-uav-process.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-process + servicePort: 8080 + - host: cmii-uav-sense-adapter.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-sense-adapter + servicePort: 8080 + - host: cmii-uav-surveillance.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-surveillance + servicePort: 8080 + - host: cmii-uav-threedsimulation.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-threedsimulation + servicePort: 8080 + - host: cmii-uav-tower.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-tower + servicePort: 8080 + - host: cmii-uav-user.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-user + servicePort: 8080 + - host: cmii-uav-waypoint.uavcloud-zjyd.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-waypoint + servicePort: 8080 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: all-gateways-ingress + namespace: zjyd + labels: + type: api-gateway + octopus.control: all-ingress-config-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; +spec: + rules: + - host: fake-domain.zjyd.io + http: + paths: + - path: /oms/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - path: /open/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - path: /api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 diff --git a/agent-common/real_project/zjyd/old/k8s-mongo.yaml b/agent-common/real_project/zjyd/old/k8s-mongo.yaml new file mode 100644 index 0000000..95e234b --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-mongo.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mongo + namespace: zjyd + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + type: NodePort + selector: + cmii.app: helm-mongo + cmii.type: middleware + ports: + - port: 27017 + name: server-27017 + targetPort: 27017 + nodePort: 37017 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mongo + namespace: zjyd + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 +spec: + serviceName: helm-mongo + replicas: 1 + selector: + matchLabels: + cmii.app: helm-mongo + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.7.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: helm-mongo + image: 192.168.10.3:8033/cmii/mongo:5.0 + resources: { } + ports: + - containerPort: 27017 + name: mongo27017 + protocol: TCP + env: + - name: MONGO_INITDB_ROOT_USERNAME + value: cmlc + - name: MONGO_INITDB_ROOT_PASSWORD + value: REdPza8#oVlt + volumeMounts: + - name: mongo-data + mountPath: /data/db + readOnly: false + subPath: default/helm-mongo/data/db + volumes: + - name: mongo-data + persistentVolumeClaim: + claimName: helm-mongo +--- diff --git a/agent-common/real_project/zjyd/old/k8s-mysql.yaml b/agent-common/real_project/zjyd/old/k8s-mysql.yaml new file mode 100644 index 0000000..b9f9310 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-mysql.yaml @@ -0,0 +1,423 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + annotations: { } +secrets: + - name: helm-mysql +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + mysql-root-password: "UXpmWFFoZDNiUQ==" + mysql-password: "S0F0cm5PckFKNw==" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + my.cnf: |- + + [mysqld] + port=3306 + basedir=/opt/bitnami/mysql + datadir=/bitnami/mysql/data + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + socket=/opt/bitnami/mysql/tmp/mysql.sock + log-error=/bitnami/mysql/data/error.log + general_log_file = /bitnami/mysql/data/general.log + slow_query_log_file = /bitnami/mysql/data/slow.log + innodb_data_file_path = ibdata1:512M:autoextend + innodb_buffer_pool_size = 512M + innodb_buffer_pool_instances = 2 + innodb_log_file_size = 512M + innodb_log_files_in_group = 4 + innodb_log_files_in_group = 4 + log-bin = /bitnami/mysql/data/mysql-bin + max_binlog_size=1G + transaction_isolation = REPEATABLE-READ + default_storage_engine = innodb + character-set-server = utf8mb4 + collation-server=utf8mb4_bin + binlog_format = ROW + binlog_rows_query_log_events=on + binlog_cache_size=4M + binlog_expire_logs_seconds = 1296000 + max_binlog_cache_size=2G + gtid_mode = on + enforce_gtid_consistency = 1 + sync_binlog = 1 + innodb_flush_log_at_trx_commit = 1 + innodb_flush_method = O_DIRECT + log_slave_updates=1 + relay_log_recovery = 1 + relay-log-purge = 1 + default_time_zone = '+08:00' + lower_case_table_names=1 + log_bin_trust_function_creators=1 + group_concat_max_len=67108864 + innodb_io_capacity = 4000 + innodb_io_capacity_max = 8000 + innodb_flush_sync = 0 + innodb_flush_neighbors = 0 + innodb_write_io_threads = 8 + innodb_read_io_threads = 8 + innodb_purge_threads = 4 + innodb_page_cleaners = 4 + innodb_open_files = 65535 + innodb_max_dirty_pages_pct = 50 + innodb_lru_scan_depth = 4000 + innodb_checksum_algorithm = crc32 + innodb_lock_wait_timeout = 10 + innodb_rollback_on_timeout = 1 + innodb_print_all_deadlocks = 1 + innodb_file_per_table = 1 + innodb_online_alter_log_max_size = 4G + innodb_stats_on_metadata = 0 + innodb_thread_concurrency = 0 + innodb_sync_spin_loops = 100 + innodb_spin_wait_delay = 30 + lock_wait_timeout = 3600 + slow_query_log = 1 + long_query_time = 10 + log_queries_not_using_indexes =1 + log_throttle_queries_not_using_indexes = 60 + min_examined_row_limit = 100 + log_slow_admin_statements = 1 + log_slow_slave_statements = 1 + default_authentication_plugin=mysql_native_password + skip-name-resolve=1 + explicit_defaults_for_timestamp=1 + plugin_dir=/opt/bitnami/mysql/plugin + max_allowed_packet=128M + max_connections = 2000 + max_connect_errors = 1000000 + table_definition_cache=2000 + table_open_cache_instances=64 + tablespace_definition_cache=1024 + thread_cache_size=256 + interactive_timeout = 600 + wait_timeout = 600 + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=32M + bind-address=0.0.0.0 + performance_schema = 1 + performance_schema_instrument = '%memory%=on' + performance_schema_instrument = '%lock%=on' + innodb_monitor_enable=ALL + + [mysql] + no-auto-rehash + + [mysqldump] + quick + max_allowed_packet = 32M + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql-init-scripts + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + create_users_grants_core.sql: |- + create + user zyly@'%' identified by 'Cmii@451315'; + grant select on *.* to zyly@'%'; + create + user zyly_qc@'%' identified by 'Uh)E_owCyb16'; + grant all + on *.* to zyly_qc@'%'; + create + user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; + grant all + on *.* to k8s_admin@'%'; + create + user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; + grant all + on *.* to audit_dba@'%'; + create + user db_backup@'%' identified by 'RU5Pu(4FGdT9'; + GRANT + SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT + on *.* to db_backup@'%'; + create + user monitor@'%' identified by 'PL3#nGtrWbf-'; + grant REPLICATION + CLIENT on *.* to monitor@'%'; + flush + privileges; +--- +kind: Service +apiVersion: v1 +metadata: + name: cmii-mysql + namespace: zjyd + labels: + app.kubernetes.io/component: primary + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.app: mysql + cmii.type: middleware + octopus.control: mysql-db-wdd +spec: + ports: + - name: mysql + protocol: TCP + port: 13306 + targetPort: mysql + selector: + app.kubernetes.io/component: primary + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.app: mysql + cmii.type: middleware + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql-headless + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: 3306 + targetPort: mysql + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: { } +spec: + type: NodePort + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: mysql + nodePort: 33306 + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mysql + namespace: zjyd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + serviceName: helm-mysql + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-mysql + affinity: { } + nodeSelector: + mysql-deploy: "true" + securityContext: + fsGroup: 1001 + initContainers: + - name: change-volume-permissions + image: 192.168.10.3:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + - -ec + - | + chown -R 1001:1001 /bitnami/mysql + securityContext: + runAsUser: 0 + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + containers: + - name: mysql + image: 192.168.10.3:8033/cmii/mysql:8.1.0-debian-11-r42 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "true" + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: helm-mysql + key: mysql-root-password + - name: MYSQL_DATABASE + value: "cmii" + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + readinessProbe: + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + resources: + limits: { } + requests: { } + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + volumes: + - name: config + configMap: + name: helm-mysql + - name: custom-init-scripts + configMap: + name: helm-mysql-init-scripts + - name: mysql-data + hostPath: + path: /var/lib/docker/mysql-pv/zjyd/ diff --git a/agent-common/real_project/zjyd/old/k8s-nacos.yaml b/agent-common/real_project/zjyd/old/k8s-nacos.yaml new file mode 100644 index 0000000..1f70d80 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-nacos.yaml @@ -0,0 +1,130 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-nacos-cm + namespace: zjyd + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.7.0 +data: + mysql.db.name: "cmii_nacos_config" + mysql.db.host: "helm-mysql" + mysql.port: "3306" + mysql.user: "k8s_admin" + mysql.password: "fP#UaH6qQ3)8" +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-nacos + namespace: zjyd + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.7.0 +spec: + type: NodePort + selector: + cmii.app: helm-nacos + cmii.type: middleware + ports: + - port: 8848 + name: server + targetPort: 8848 + nodePort: 38848 + - port: 9848 + name: server12 + targetPort: 9848 + - port: 9849 + name: server23 + targetPort: 9849 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-nacos + namespace: zjyd + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.7.0 +spec: + serviceName: helm-nacos + replicas: 1 + selector: + matchLabels: + cmii.app: helm-nacos + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/version: 5.7.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + imagePullSecrets: + - name: harborsecret + affinity: { } + containers: + - name: nacos-server + image: 192.168.10.3:8033/cmii/nacos-server:v2.1.2 + ports: + - containerPort: 8848 + name: dashboard + - containerPort: 9848 + name: tcp-9848 + - containerPort: 9849 + name: tcp-9849 + env: + - name: NACOS_AUTH_ENABLE + value: "false" + - name: NACOS_REPLICAS + value: "1" + - name: MYSQL_SERVICE_DB_NAME + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.name + - name: MYSQL_SERVICE_PORT + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.port + - name: MYSQL_SERVICE_USER + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.user + - name: MYSQL_SERVICE_PASSWORD + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.password + - name: MYSQL_SERVICE_HOST + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.host + - name: NACOS_SERVER_PORT + value: "8848" + - name: NACOS_APPLICATION_PORT + value: "8848" + - name: PREFER_HOST_MODE + value: "hostname" + - name: MODE + value: standalone + - name: SPRING_DATASOURCE_PLATFORM + value: mysql +--- diff --git a/agent-common/real_project/zjyd/old/k8s-nfs-test.yaml b/agent-common/real_project/zjyd/old/k8s-nfs-test.yaml new file mode 100644 index 0000000..f49291a --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-nfs-test.yaml @@ -0,0 +1,38 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-prod-distribute + resources: + requests: + storage: 1Mi +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod +spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: test-pod + image: 192.168.10.3:8033/cmii/busybox:latest + command: + - "/bin/sh" + args: + - "-c" + - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim #与PVC名称保持一致 diff --git a/agent-common/real_project/zjyd/old/k8s-nfs.yaml b/agent-common/real_project/zjyd/old/k8s-nfs.yaml new file mode 100644 index 0000000..726fb42 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-nfs.yaml @@ -0,0 +1,114 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #根据实际环境设定namespace,下面类同 +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: ClusterRole + # name: nfs-client-provisioner-runner + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get", "list", "watch", "create", "update", "patch" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs-prod-distribute +provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #与RBAC文件中的namespace保持一致 +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: 192.168.10.3:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: cmlc-nfs-storage + - name: NFS_SERVER + value: 192.168.10.3 + - name: NFS_PATH + value: /var/lib/docker/nfs_data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.10.3 + path: /var/lib/docker/nfs_data diff --git a/agent-common/real_project/zjyd/old/k8s-pvc.yaml b/agent-common/real_project/zjyd/old/k8s-pvc.yaml new file mode 100644 index 0000000..8082c4e --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-pvc.yaml @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-backend-log-pvc + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: nfs-backend-log-pvc + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-emqxs + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: helm-emqxs + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-mongo + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: helm-mongo + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 30Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + cmii.type: middleware-base + cmii.app: helm-rabbitmq + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.7.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi diff --git a/agent-common/real_project/zjyd/old/k8s-rabbitmq.yaml b/agent-common/real_project/zjyd/old/k8s-rabbitmq.yaml new file mode 100644 index 0000000..bc68704 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-rabbitmq.yaml @@ -0,0 +1,328 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +automountServiceAccountToken: true +secrets: + - name: helm-rabbitmq +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +type: Opaque +data: + rabbitmq-password: "blljUk45MXIuX2hq" + rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-rabbitmq-config + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +data: + rabbitmq.conf: |- + ## Username and password + ## + default_user = admin + default_pass = nYcRN91r._hj + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + #default_vhost = default-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +subjects: + - kind: ServiceAccount + name: helm-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helm-rabbitmq-endpoint-reader +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq-headless + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: dashboard + port: 15672 + targetPort: stats + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjyd + publishNotReadyAddresses: true +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +spec: + type: NodePort + ports: + - name: amqp + port: 5672 + targetPort: amqp + nodePort: 35672 + - name: dashboard + port: 15672 + targetPort: dashboard + nodePort: 36675 + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjyd +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-rabbitmq + namespace: zjyd + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq +spec: + serviceName: helm-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjyd + template: + metadata: + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: rabbitmq + annotations: + checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 + checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f + spec: + imagePullSecrets: + - name: harborsecret + serviceAccountName: helm-rabbitmq + affinity: { } + securityContext: + fsGroup: 5001 + runAsUser: 5001 + terminationGracePeriodSeconds: 120 + initContainers: + - name: volume-permissions + image: 192.168.10.3:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + resources: + limits: { } + requests: { } + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + containers: + - name: rabbitmq + image: 192.168.10.3:8033/cmii/rabbitmq:3.9.12-debian-10-r3 + imagePullPolicy: "Always" + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "helm-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_FORCE_BOOT + value: "no" + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: "no" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: "admin" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" + ports: + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: dashboard + containerPort: 15672 + - name: epmd + containerPort: 4369 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: 120 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" + else + rabbitmqctl stop_app + fi + resources: + limits: { } + requests: { } + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + volumes: + - name: configuration + configMap: + name: helm-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - name: data + persistentVolumeClaim: + claimName: helm-rabbitmq diff --git a/agent-common/real_project/zjyd/old/k8s-redis.yaml b/agent-common/real_project/zjyd/old/k8s-redis.yaml new file mode 100644 index 0000000..a63b949 --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-redis.yaml @@ -0,0 +1,585 @@ +--- +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: helm-redis + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-redis + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + redis-password: "TWNhY2hlQDQ1MjI=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-configuration + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +data: + redis.conf: |- + # User-supplied common configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # End of common configuration + master.conf: |- + dir /data + # User-supplied master configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of master configuration + replica.conf: |- + dir /data + slave-read-only yes + # User-supplied replica configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of replica configuration +--- +# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-health + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-scripts + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +data: + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + exec redis-server "${ARGS[@]}" + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo 26379 + ;; + "REDIS") + echo 6379 + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + echo "${hostname}.${HEADLESS_SERVICE}" + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + exec redis-server "${ARGS[@]}" +--- +# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-headless + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd +--- +# Source: outside-deploy/charts/redis-db/templates/master/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-master + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + type: ClusterIP + + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-replicas + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + type: ClusterIP + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + app.kubernetes.io/component: replica +--- +# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-master + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + affinity: { } + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + imagePullSecrets: + - name: harborsecret + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 192.168.10.3:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: tmp + emptyDir: { } + - name: redis-data + emptyDir: { } +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-replicas + namespace: zjyd + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: zjyd + app.kubernetes.io/component: replica + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: zjyd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + imagePullSecrets: + - name: harborsecret + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 192.168.10.3:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: helm-redis-master-0.helm-redis-headless.zjyd.svc.cluster.local + - name: REDIS_MASTER_PORT_NUMBER + value: "6379" + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: redis-data + emptyDir: { } + diff --git a/agent-common/real_project/zjyd/old/k8s-srs.yaml b/agent-common/real_project/zjyd/old/k8s-srs.yaml new file mode 100644 index 0000000..0ecae1b --- /dev/null +++ b/agent-common/real_project/zjyd/old/k8s-srs.yaml @@ -0,0 +1,496 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-srs-cm + namespace: zjyd + labels: + cmii.app: live-srs + cmii.type: live + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 +data: + srs.rtc.conf: |- + listen 30935; + max_connections 4096; + srs_log_tank console; + srs_log_level info; + srs_log_file /home/srs.log; + daemon off; + http_api { + enabled on; + listen 1985; + crossdomain on; + } + stats { + network 0; + } + http_server { + enabled on; + listen 8080; + dir /home/hls; + } + srt_server { + enabled on; + listen 30556; + maxbw 1000000000; + connect_timeout 4000; + peerlatency 600; + recvlatency 600; + } + rtc_server { + enabled on; + listen 30090; + candidate $CANDIDATE; + } + vhost __defaultVhost__ { + http_hooks { + enabled on; + on_publish http://helm-live-op-svc-v2:8080/hooks/on_push; + } + http_remux { + enabled on; + } + rtc { + enabled on; + rtmp_to_rtc on; + rtc_to_rtmp on; + keep_bframe off; + } + tcp_nodelay on; + min_latency on; + play { + gop_cache off; + mw_latency 100; + mw_msgs 10; + } + publish { + firstpkt_timeout 8000; + normal_timeout 4000; + mr on; + } + dvr { + enabled off; + dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4; + dvr_plan session; + } + hls { + enabled on; + hls_path /home/hls; + hls_fragment 10; + hls_window 60; + hls_m3u8_file [app]/[stream].m3u8; + hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts; + hls_cleanup on; + hls_entry_prefix http://111.2.224.59:8088; + } + } +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc-exporter + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + nodePort: 30935 + - name: rtc + protocol: UDP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: rtc-tcp + protocol: TCP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: srt + protocol: UDP + port: 30556 + targetPort: 30556 + nodePort: 30556 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + nodePort: 30557 + selector: + srs-role: rtc + type: NodePort + sessionAffinity: None + externalTrafficPolicy: Cluster + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srsrtc-svc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: helm-live-srs-rtc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-srs + cmii.type: live + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 + srs-role: rtc +spec: + replicas: 1 + selector: + matchLabels: + srs-role: rtc + template: + metadata: + labels: + srs-role: rtc + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-srs-cm + items: + - key: srs.rtc.conf + path: docker.conf + defaultMode: 420 + - name: srs-vol + emptyDir: + sizeLimit: 8Gi + containers: + - name: srs-rtc + image: 192.168.10.3:8033/cmii/srs:v5.0.195 + ports: + - name: srs-rtmp + containerPort: 30935 + protocol: TCP + - name: srs-api + containerPort: 1985 + protocol: TCP + - name: srs-flv + containerPort: 8080 + protocol: TCP + - name: srs-webrtc + containerPort: 30090 + protocol: UDP + - name: srs-webrtc-tcp + containerPort: 30090 + protocol: TCP + - name: srs-srt + containerPort: 30556 + protocol: UDP + env: + - name: CANDIDATE + value: 111.2.224.59 + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /usr/local/srs/conf/docker.conf + subPath: docker.conf + - name: srs-vol + mountPath: /home/dvr + subPath: zjyd/helm-live/dvr + - name: srs-vol + mountPath: /home/hls + subPath: zjyd/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + - name: oss-adaptor + image: 192.168.10.3:8033/cmii/cmii-srs-oss-adaptor:2023-SA + env: + - name: OSS_ENDPOINT + value: 'http://192.168.10.2:9000' + - name: OSS_AK + value: cmii + - name: OSS_SK + value: 'B#923fC7mk' + - name: OSS_BUCKET + value: live-cluster-hls + - name: SRS_OP + value: 'http://helm-live-op-svc-v2:8080' + - name: MYSQL_ENDPOINT + value: 'helm-mysql:3306' + - name: MYSQL_USERNAME + value: k8s_admin + - name: MYSQL_PASSWORD + value: fP#UaH6qQ3)8 + - name: MYSQL_DATABASE + value: cmii_live_srs_op + - name: MYSQL_TABLE + value: live_segment + - name: LOG_LEVEL + value: info + - name: OSS_META + value: 'yes' + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-vol + mountPath: /cmii/share/hls + subPath: zjyd/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + serviceName: helm-live-srsrtc-svc + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: 0 + revisionHistoryLimit: 10 +--- +# live-srs部分 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helm-live-op-v2 + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live + helm.sh/chart: cmlc-live-live-op-2.0.0 + live-role: op-v2 +spec: + replicas: 1 + selector: + matchLabels: + live-role: op-v2 + template: + metadata: + labels: + live-role: op-v2 + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-op-cm-v2 + items: + - key: live.op.conf + path: bootstrap.yaml + defaultMode: 420 + containers: + - name: helm-live-op-v2 + image: 192.168.10.3:8033/cmii/cmii-live-operator:5.2.0 + ports: + - name: operator + containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 4800m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /cmii/bootstrap.yaml + subPath: bootstrap.yaml + livenessProbe: + httpGet: + path: /cmii/health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + maxSurge: 25% + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc-v2 + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + nodePort: 30333 + selector: + live-role: op-v2 + type: NodePort + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + live-role: op + type: ClusterIP + sessionAffinity: None +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-op-cm-v2 + namespace: zjyd + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live +data: + live.op.conf: |- + server: + port: 8080 + spring: + main: + allow-bean-definition-overriding: true + allow-circular-references: true + application: + name: cmii-live-operator + platform: + info: + name: cmii-live-operator + description: cmii-live-operator + version: 5.7.0 + scanPackage: com.cmii.live.op + cloud: + nacos: + config: + username: developer + password: N@cos14Good + server-addr: helm-nacos:8848 + extension-configs: + - data-id: cmii-live-operator.yml + group: 5.7.0 + refresh: true + shared-configs: + - data-id: cmii-backend-system.yml + group: 5.7.0 + refresh: true + discovery: + enabled: false + + live: + engine: + type: srs + endpoint: 'http://helm-live-srs-svc:1985' + proto: + rtmp: 'rtmp://111.2.224.59:30935' + rtsp: 'rtsp://111.2.224.59:30554' + srt: 'srt://111.2.224.59:30556' + flv: 'http://111.2.224.59:30500' + hls: 'http://111.2.224.59:30500' + rtc: 'webrtc://111.2.224.59:30090' + replay: 'https://111.2.224.59:30333' + minio: + endpoint: http://192.168.10.2:9000 + access-key: cmii + secret-key: B#923fC7mk + bucket: live-cluster-hls diff --git a/agent-common/real_project/zjyd/zjyd-app-yaml.zip b/agent-common/real_project/zjyd/zjyd-app-yaml.zip new file mode 100644 index 0000000..4312143 Binary files /dev/null and b/agent-common/real_project/zjyd/zjyd-app-yaml.zip differ diff --git a/agent-operator/real_project/zyga/operator.go b/agent-common/real_project/zyga/operator.go similarity index 100% rename from agent-operator/real_project/zyga/operator.go rename to agent-common/real_project/zyga/operator.go diff --git a/agent-common/utils/FileUtils.go b/agent-common/utils/FileUtils.go index eba833e..6c390b5 100755 --- a/agent-common/utils/FileUtils.go +++ b/agent-common/utils/FileUtils.go @@ -68,6 +68,33 @@ func AppendContentToFile(content string, targetFile string) bool { return true } +func AppendOverwriteListContentToFile(contentList []string, targetFile string) bool { + + err := os.Remove(targetFile) + if err != nil { + log.WarnF("[AppendOverwriteListContentToFile] - Error removing file: %s , error is %s", targetFile, err.Error()) + } + + // 打开文件用于追加。如果文件不存在,将会创建一个新文件。 + file, err := os.OpenFile(targetFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.ErrorF("[AppendOverwriteListContentToFile] - Error opening file: %s , error is %s", targetFile, err.Error()) + return false + } + defer file.Close() // 确保文件最终被关闭 + + // 写入内容到文件 + for _, contentLine := range contentList { + //bytes, _ := json.Marshal(contentLine) + if _, err := file.WriteString(contentLine + "\n"); err != nil { + log.ErrorF("[AppendOverwriteListContentToFile] - Error writing to file: %s , error is %s", targetFile, err.Error()) + return false + } + } + + return true +} + // AppendContentWithSplitLineToFile 专门为k8s的yaml文件设计的,在每次写入内容之前,先写入一行分隔符 func AppendContentWithSplitLineToFile(content string, targetFile string) bool { @@ -174,6 +201,10 @@ func ListAllFileInFolder(folderName string) ([]string, error) { return listAllFileInFolderWithFullPath(folderName, false) } +func ListAllFileInFolderWithFullPath(folderName string) ([]string, error) { + return listAllFileInFolderWithFullPath(folderName, true) +} + func listAllFileInFolderWithFullPath(folderName string, fullPath bool) ([]string, error) { files := make([]string, 0) err := filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error { diff --git a/agent-deploy/OctopusDeploy.go b/agent-deploy/OctopusDeploy.go index 2fc9cff..4a600ed 100755 --- a/agent-deploy/OctopusDeploy.go +++ b/agent-deploy/OctopusDeploy.go @@ -66,8 +66,8 @@ func OctopusDeploy() { gzipFilePrefix := "/root/octopus_image/xjyd/" frontendImageVersionMap, backendImageVersionMap, _ := image2.GzipFolderPathToCmiiImageTagMaps(gzipFilePrefix) - backendDeploy(common, backendImageVersionMap) - frontendDeploy(common, frontendImageVersionMap) + doDeployBackend(common, backendImageVersionMap) + doDeployFrontend(common, frontendImageVersionMap) d_app.SRSDeploy(common) } @@ -91,8 +91,8 @@ func CmiiAppDeploy() { gzipFilePrefix := "/root/octopus_image/xjyd/" frontendImageVersionMap, backendImageVersionMap, _ := image2.GzipFolderPathToCmiiImageTagMaps(gzipFilePrefix) - backendDeploy(common, backendImageVersionMap) - frontendDeploy(common, frontendImageVersionMap) + doDeployBackend(common, backendImageVersionMap) + doDeployFrontend(common, frontendImageVersionMap) //utils.BeautifulPrint(frontendImageVersionMap) //configMapDeploy(common) @@ -100,34 +100,16 @@ func CmiiAppDeploy() { } -var IgnoreCmiiBackendAppName = map[string]string{ - "cmii-uav-grid-datasource": "0", - "cmii-uav-grid-manage": "", - "cmii-uav-grid-engine": "", - "cmii-uav-kpi-monitor": "", - "cmii-uav-gis-server": "", - "cmii-app-release": "", - "cmii-uav-autowaypoint": "", - "cmii-uav-integration": "", - "cmii-uav-developer": "", - "cmii-open-gateway": "", - "cmii-uav-brain": "", - "cmii-uav-data-post-process": "", - "cmii-uav-multilink": "", - "cmii-uav-alarm": "", - "cmii-uav-tower": "", - "cmii-uav-clusters": "", - "cmii-uav-depotautoreturn": "", -} - -func backendDeploy(common *z_dep.CommonEnvironmentConfig, backendImageVersionMap map[string]string) { +func doDeployBackend(common *z_dep.CommonEnvironmentConfig, backendImageVersionMap map[string]string) { os.Remove(z_dep.BackendApplyFilePath) for appName, tag := range backendImageVersionMap { d_app.DefaultCmiiBackendConfig.AppName = appName d_app.DefaultCmiiBackendConfig.ImageTag = tag - _, ok := IgnoreCmiiBackendAppName[appName] + + // ignore some app + _, ok := d_app.IgnoreCmiiBackendAppName[appName] if ok { d_app.DefaultCmiiBackendConfig.Replicas = "0" } else { @@ -138,7 +120,7 @@ func backendDeploy(common *z_dep.CommonEnvironmentConfig, backendImageVersionMap } } -func frontendDeploy(common *z_dep.CommonEnvironmentConfig, frontendImageVersionMap map[string]string) { +func doDeployFrontend(common *z_dep.CommonEnvironmentConfig, frontendImageVersionMap map[string]string) { os.Remove(z_dep.FrontendApplyFilePath) @@ -147,7 +129,14 @@ func frontendDeploy(common *z_dep.CommonEnvironmentConfig, frontendImageVersionM for appName, tag := range frontendImageVersionMap { d_app.DefaultCmiiFrontendConfig.AppName = appName d_app.DefaultCmiiFrontendConfig.ImageTag = tag - d_app.DefaultCmiiFrontendConfig.Replicas = "1" + + // ignore some app + _, ok := d_app.IgnoreCmiiFrontendAppName[appName] + if ok { + d_app.DefaultCmiiFrontendConfig.Replicas = "0" + } else { + d_app.DefaultCmiiFrontendConfig.Replicas = "1" + } value, ok := d_app.FrontendShortNameMaps[appName] if !ok { @@ -201,6 +190,12 @@ func CmiiEnvironmentDeploy(isCompleteDeploy bool, commonEnv *z_dep.CommonEnviron // generate new apply file for specific environment if isCompleteDeploy { + // dashboard + a_dashboard.K8sDashboardDeploy(commonEnv) + // nfs + b_nfs.NFSDeploy(commonEnv) + b_nfs.NFSTestDeploy(commonEnv) + // pvc c_middle.PVCDeploy(commonEnv) @@ -212,16 +207,15 @@ func CmiiEnvironmentDeploy(isCompleteDeploy bool, commonEnv *z_dep.CommonEnviron cmiiEnvConfig.RabbitMQConfig.MidRabbitMQDeploy(commonEnv) cmiiEnvConfig.NacosConfig.MidNacosDeploy(commonEnv) - configMapDeploy(commonEnv) - - d_app.DefaultIngressConfig.IngressDeploy(commonEnv) - } // frontend - frontendDeploy(commonEnv, frontendImageVersionMap) + configMapDeploy(commonEnv) + d_app.DefaultIngressConfig.IngressDeploy(commonEnv) + doDeployFrontend(commonEnv, frontendImageVersionMap) + // backend - backendDeploy(commonEnv, backendImageVersionMap) + doDeployBackend(commonEnv, backendImageVersionMap) // srs cmiiEnvConfig.CmiiSrsConfig.SRSDeploy(commonEnv) @@ -239,10 +233,10 @@ func CmiiNewAppDeploy(commonEnv *z_dep.CommonEnvironmentConfig, backendImageVers d_app.DefaultIngressConfig.IngressDeploy(commonEnv) // frontend - frontendDeploy(commonEnv, frontendImageVersionMap) + doDeployFrontend(commonEnv, frontendImageVersionMap) // backend - backendDeploy(commonEnv, backendImageVersionMap) + doDeployBackend(commonEnv, backendImageVersionMap) } func getCmiiEnvConfigurationFromNamespace(namespace string) *e_cmii.CmiiEnvConfig { diff --git a/agent-deploy/b_nfs/DeployNFS.go b/agent-deploy/b_nfs/DeployNFS.go index 0fad431..0a1c2bb 100755 --- a/agent-deploy/b_nfs/DeployNFS.go +++ b/agent-deploy/b_nfs/DeployNFS.go @@ -14,6 +14,12 @@ type NfsDeployConfig struct { } func NFSDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + + if commonEnv.NFSServerIP == "" { + log.Error("Nfs server ip is nil") + return false + } + return commonEnv.ParseCommonEnvToApplyFile(CmiiNfsTemplate, z_dep.NfsApplyFilePath) } diff --git a/agent-deploy/b_nfs/TemplateNFS.go b/agent-deploy/b_nfs/TemplateNFS.go index c6767e8..0bbf433 100755 --- a/agent-deploy/b_nfs/TemplateNFS.go +++ b/agent-deploy/b_nfs/TemplateNFS.go @@ -82,8 +82,6 @@ metadata: # replace with namespace where provisioner is deployed namespace: kube-system #与RBAC文件中的namespace保持一致 spec: - imagePullSecrets: - - name: harborsecret replicas: 1 selector: matchLabels: @@ -95,6 +93,8 @@ spec: labels: app: nfs-client-provisioner spec: + imagePullSecrets: + - name: harborsecret serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner diff --git a/agent-deploy/b_nfs/TemplateNFSTest.go b/agent-deploy/b_nfs/TemplateNFSTest.go index 7feda30..51bad95 100755 --- a/agent-deploy/b_nfs/TemplateNFSTest.go +++ b/agent-deploy/b_nfs/TemplateNFSTest.go @@ -21,13 +21,13 @@ metadata: name: test-pod spec: imagePullSecrets: - - name: harborsecret + - name: harborsecret containers: - name: test-pod {{- if .HarborPort }} - image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/busybox:latest + image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/busybox:latest {{- else }} - image: {{ .HarborIPOrCustomImagePrefix }}busybox:latest + image: {{ .HarborIPOrCustomImagePrefix }}busybox:latest {{- end }} command: - "/bin/sh" diff --git a/agent-deploy/c_middle/CmiiEmqxTemplate.go b/agent-deploy/c_middle/CmiiEmqxTemplate.go index 2d372a2..256cd58 100755 --- a/agent-deploy/c_middle/CmiiEmqxTemplate.go +++ b/agent-deploy/c_middle/CmiiEmqxTemplate.go @@ -45,16 +45,16 @@ metadata: app.kubernetes.io/version: {{ .TagVersion }} data: emqx_auth_mnesia.conf: |- - auth.mnesia.password_hash = sha256 + auth.mnesia.password_hash = sha256 - # clientid 认证数据 - # auth.client.1.clientid = admin - # auth.client.1.password = 4YPk*DS%+5 + # clientid 认证数据 + # auth.client.1.clientid = admin + # auth.client.1.password = 4YPk*DS%+5 - ## username 认证数据 - auth.user.1.username = admin - auth.user.1.password = {{ .EmqxPassword }} - auth.user.2.username = cmlc + ## username 认证数据 + auth.user.1.username = admin + auth.user.1.password = {{ .EmqxPassword }} + auth.user.2.username = cmlc auth.user.2.password = {{ .EmqxPassword }} acl.conf: |- diff --git a/agent-deploy/d_app/CmiiImageConfig.go b/agent-deploy/d_app/CmiiImageConfig.go index 7bfe660..e8121a5 100755 --- a/agent-deploy/d_app/CmiiImageConfig.go +++ b/agent-deploy/d_app/CmiiImageConfig.go @@ -77,6 +77,38 @@ var CmiiFrontendAppMap = map[string]string{ "cmii-uav-platform-uas": "5.2.0", } +var IgnoreCmiiBackendAppName = map[string]string{ + "cmii-uav-grid-datasource": "0", + "cmii-uav-grid-manage": "0", + "cmii-uav-grid-engine": "0", + "cmii-uav-kpi-monitor": "0", + "cmii-uav-gis-server": "0", + "cmii-app-release": "0", + "cmii-uav-autowaypoint": "0", + "cmii-uav-integration": "0", + "cmii-uav-developer": "0", + "cmii-open-gateway": "0", + "cmii-uav-brain": "0", + "cmii-uav-data-post-process": "0", + "cmii-uav-multilink": "0", + "cmii-uav-alarm": "0", + "cmii-uav-tower": "0", + "cmii-uav-clusters": "0", + "cmii-uav-depotautoreturn": "0", + "cmii-uas-lifecycle": "0", + "cmii-uas-gateway": "0", +} + +var IgnoreCmiiFrontendAppName = map[string]string{ + "cmii-uav-platform-hljtt": "0", + "cmii-uav-platform-jiangsuwenlv": "0", + "cmii-uav-platform-qinghaitourism": "0", + "cmii-uav-platform-qingdao": "0", + "cmii-uav-platform-uasms": "0", + "cmii-uav-platform-uas": "0", + "cmii-uav-platform-seniclive": "0", +} + var CmiiMiddlewareNameMap = map[string]string{ "helm-nacos": "single", "helm-emqxs": "single", @@ -99,98 +131,98 @@ var CmiiGISAppMap = map[string]string{ } var MiddlewareAmd64 = []string{ - "bitnami/redis:6.2.6-debian-10-r0", - "bitnami/redis:6.2.14-debian-11-r1", - "bitnami/mysql:8.0.35-debian-11-r1", - "bitnami/mysql:8.1.0-debian-11-r42", - "simonrupf/chronyd:0.4.3", - "bitnami/bitnami-shell:10-debian-10-r140", - "bitnami/bitnami-shell:11-debian-11-r136", - "bitnami/rabbitmq:3.9.12-debian-10-r3", - "bitnami/rabbitmq:3.11.26-debian-11-r2", - "ossrs/srs:v4.0.136", - "ossrs/srs:v5.0.195", - "ossrs/srs:v4.0-r3", - "emqx/emqx:4.4.9", - "emqx/emqx:5.5.1", - "nacos/nacos-server:v2.1.2", - "nacos/nacos-server:v2.1.2-slim", - "mongo:5.0", - "rabbitmq:3.9-management", - "bitnami/minio:2022.5.4", - "bitnami/minio:2023.5.4", - "kubernetesui/dashboard:v2.0.1", - "kubernetesui/metrics-scraper:v1.0.4", - "nginx:1.21.3", - "redis:6.0.20-alpine", - "dyrnq/nfs-subdir-external-provisioner:v4.0.2", - "jerrychina2020/rke-tools:v0.175-linux", - "jerrychina2020/rke-tools:v0.175", - "busybox:latest", + "docker.107421.xyz/bitnami/redis:6.2.6-debian-10-r0", + "docker.107421.xyz/bitnami/redis:6.2.14-debian-11-r1", + "docker.107421.xyz/bitnami/mysql:8.0.35-debian-11-r1", + "docker.107421.xyz/bitnami/mysql:8.1.0-debian-11-r42", + "docker.107421.xyz/simonrupf/chronyd:0.4.3", + "docker.107421.xyz/bitnami/bitnami-shell:10-debian-10-r140", + "docker.107421.xyz/bitnami/bitnami-shell:11-debian-11-r136", + "docker.107421.xyz/bitnami/rabbitmq:3.9.12-debian-10-r3", + "docker.107421.xyz/bitnami/rabbitmq:3.11.26-debian-11-r2", + "docker.107421.xyz/ossrs/srs:v4.0.136", + "docker.107421.xyz/ossrs/srs:v5.0.195", + "docker.107421.xyz/ossrs/srs:v4.0-r3", + "docker.107421.xyz/emqx/emqx:4.4.19", + "docker.107421.xyz/emqx/emqx:5.5.1", + "docker.107421.xyz/nacos/nacos-server:v2.1.2", + "docker.107421.xyz/nacos/nacos-server:v2.1.2-slim", + "docker.107421.xyz/library/mongo:5.0", + "docker.107421.xyz/library/rabbitmq:3.9-management", + "docker.107421.xyz/bitnami/minio:2022.5.4", + "docker.107421.xyz/bitnami/minio:2023.5.4", + "docker.107421.xyz/kubernetesui/dashboard:v2.0.1", + "docker.107421.xyz/kubernetesui/metrics-scraper:v1.0.4", + "docker.107421.xyz/library/nginx:1.21.3", + "docker.107421.xyz/library/redis:6.0.20-alpine", + "docker.107421.xyz/dyrnq/nfs-subdir-external-provisioner:v4.0.2", + "docker.107421.xyz/jerrychina2020/rke-tools:v0.175-linux", + "docker.107421.xyz/jerrychina2020/rke-tools:v0.175", + "docker.107421.xyz/library/busybox:latest", } var Rancher1204Amd64 = []string{ - "rancher/backup-restore-operator:v1.0.3", - "rancher/calico-cni:v3.17.2", - "rancher/calico-ctl:v3.17.2", - "rancher/calico-kube-controllers:v3.17.2", - "rancher/calico-node:v3.17.2", - "rancher/calico-pod2daemon-flexvol:v3.17.2", - "rancher/cis-operator:v1.0.3", - "rancher/cluster-proportional-autoscaler:1.7.1", - "rancher/coredns-coredns:1.8.0", - "rancher/coreos-etcd:v3.4.14-rancher1", - "rancher/coreos-kube-state-metrics:v1.9.7", - "rancher/coreos-prometheus-config-reloader:v0.39.0", - "rancher/coreos-prometheus-operator:v0.39.0", - "rancher/externalip-webhook:v0.1.6", - "rancher/flannel-cni:v0.3.0-rancher6", - "rancher/coreos-flannel:v0.13.0-rancher1", - "rancher/fleet-agent:v0.3.4", - "rancher/fleet:v0.3.4", - "rancher/fluentd:v0.1.24", - "rancher/grafana-grafana:7.1.5", - "rancher/hyperkube:v1.20.4-rancher1", - "rancher/jimmidyson-configmap-reload:v0.3.0", - "rancher/k8s-dns-dnsmasq-nanny:1.15.2", - "rancher/k8s-dns-kube-dns:1.15.2", - "rancher/k8s-dns-node-cache:1.15.13", - "rancher/k8s-dns-sidecar:1.15.2", - "rancher/klipper-lb:v0.1.2", - "rancher/kube-api-auth:v0.1.4", - "rancher/kubectl:v1.20.4", - "rancher/kubernetes-external-dns:v0.7.3", - "rancher/cluster-proportional-autoscaler:1.8.1", - "rancher/library-busybox:1.31.1", - "rancher/library-busybox:1.32.1", - "rancher/library-nginx:1.19.2-alpine", - "rancher/library-traefik:1.7.19", - "rancher/local-path-provisioner:v0.0.11", - "rancher/local-path-provisioner:v0.0.14", - "rancher/local-path-provisioner:v0.0.19", - "rancher/log-aggregator:v0.1.7", - "rancher/istio-kubectl:1.5.10", - "rancher/metrics-server:v0.4.1", - "rancher/configmap-reload:v0.3.0-rancher4", - "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", - "rancher/nginx-ingress-controller:nginx-0.43.0-rancher1", - "rancher/opa-gatekeeper:v3.1.0-beta.7", - "rancher/openzipkin-zipkin:2.14.2", - "rancher/pause:3.2", - "rancher/plugins-docker:18.09", - "rancher/prom-alertmanager:v0.21.0", - "rancher/prom-node-exporter:v1.0.1", - "rancher/prom-prometheus:v2.18.2", - "rancher/prometheus-auth:v0.2.1", - "rancher/rancher-agent:v2.5.7", - "rancher/rancher-webhook:v0.1.0-beta9", - "rancher/rancher:v2.5.7", - "rancher/rke-tools:v0.1.72", - "rancher/security-scan:v0.1.14", - "rancher/security-scan:v0.2.2", - "rancher/shell:v0.1.6", - "rancher/sonobuoy-sonobuoy:v0.16.3", - "rancher/system-upgrade-controller:v0.6.2", + "docker.107421.xyz/rancher/backup-restore-operator:v1.0.3", + "docker.107421.xyz/rancher/calico-cni:v3.17.2", + "docker.107421.xyz/rancher/calico-ctl:v3.17.2", + "docker.107421.xyz/rancher/calico-kube-controllers:v3.17.2", + "docker.107421.xyz/rancher/calico-node:v3.17.2", + "docker.107421.xyz/rancher/calico-pod2daemon-flexvol:v3.17.2", + "docker.107421.xyz/rancher/cis-operator:v1.0.3", + "docker.107421.xyz/rancher/cluster-proportional-autoscaler:1.7.1", + "docker.107421.xyz/rancher/coredns-coredns:1.8.0", + "docker.107421.xyz/rancher/coreos-etcd:v3.4.14-rancher1", + "docker.107421.xyz/rancher/coreos-kube-state-metrics:v1.9.7", + "docker.107421.xyz/rancher/coreos-prometheus-config-reloader:v0.39.0", + "docker.107421.xyz/rancher/coreos-prometheus-operator:v0.39.0", + "docker.107421.xyz/rancher/externalip-webhook:v0.1.6", + "docker.107421.xyz/rancher/flannel-cni:v0.3.0-rancher6", + "docker.107421.xyz/rancher/coreos-flannel:v0.13.0-rancher1", + "docker.107421.xyz/rancher/fleet-agent:v0.3.4", + "docker.107421.xyz/rancher/fleet:v0.3.4", + "docker.107421.xyz/rancher/fluentd:v0.1.24", + "docker.107421.xyz/rancher/grafana-grafana:7.1.5", + "docker.107421.xyz/rancher/hyperkube:v1.20.4-rancher1", + "docker.107421.xyz/rancher/jimmidyson-configmap-reload:v0.3.0", + "docker.107421.xyz/rancher/k8s-dns-dnsmasq-nanny:1.15.2", + "docker.107421.xyz/rancher/k8s-dns-kube-dns:1.15.2", + "docker.107421.xyz/rancher/k8s-dns-node-cache:1.15.13", + "docker.107421.xyz/rancher/k8s-dns-sidecar:1.15.2", + "docker.107421.xyz/rancher/klipper-lb:v0.1.2", + "docker.107421.xyz/rancher/kube-api-auth:v0.1.4", + "docker.107421.xyz/rancher/kubectl:v1.20.4", + "docker.107421.xyz/rancher/kubernetes-external-dns:v0.7.3", + "docker.107421.xyz/rancher/cluster-proportional-autoscaler:1.8.1", + "docker.107421.xyz/rancher/library-busybox:1.31.1", + "docker.107421.xyz/rancher/library-busybox:1.32.1", + "docker.107421.xyz/rancher/library-nginx:1.19.2-alpine", + "docker.107421.xyz/rancher/library-traefik:1.7.19", + "docker.107421.xyz/rancher/local-path-provisioner:v0.0.11", + "docker.107421.xyz/rancher/local-path-provisioner:v0.0.14", + "docker.107421.xyz/rancher/local-path-provisioner:v0.0.19", + "docker.107421.xyz/rancher/log-aggregator:v0.1.7", + "docker.107421.xyz/rancher/istio-kubectl:1.5.10", + "docker.107421.xyz/rancher/metrics-server:v0.4.1", + "docker.107421.xyz/rancher/configmap-reload:v0.3.0-rancher4", + "docker.107421.xyz/rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "docker.107421.xyz/rancher/nginx-ingress-controller:nginx-0.43.0-rancher1", + "docker.107421.xyz/rancher/opa-gatekeeper:v3.1.0-beta.7", + "docker.107421.xyz/rancher/openzipkin-zipkin:2.14.2", + "docker.107421.xyz/rancher/pause:3.2", + "docker.107421.xyz/rancher/plugins-docker:18.09", + "docker.107421.xyz/rancher/prom-alertmanager:v0.21.0", + "docker.107421.xyz/rancher/prom-node-exporter:v1.0.1", + "docker.107421.xyz/rancher/prom-prometheus:v2.18.2", + "docker.107421.xyz/rancher/prometheus-auth:v0.2.1", + "docker.107421.xyz/rancher/rancher-agent:v2.5.7", + "docker.107421.xyz/rancher/rancher-webhook:v0.1.0-beta9", + "docker.107421.xyz/rancher/rancher:v2.5.7", + "docker.107421.xyz/rancher/rke-tools:v0.1.72", + "docker.107421.xyz/rancher/security-scan:v0.1.14", + "docker.107421.xyz/rancher/security-scan:v0.2.2", + "docker.107421.xyz/rancher/shell:v0.1.6", + "docker.107421.xyz/rancher/sonobuoy-sonobuoy:v0.16.3", + "docker.107421.xyz/rancher/system-upgrade-controller:v0.6.2", } var CmiiSRSImageList = []string{ diff --git a/agent-deploy/d_app/DeployCmiiApp.go b/agent-deploy/d_app/DeployCmiiApp.go index df4f289..5a51d77 100755 --- a/agent-deploy/d_app/DeployCmiiApp.go +++ b/agent-deploy/d_app/DeployCmiiApp.go @@ -152,6 +152,11 @@ func (srsConfig *CmiiSrsConfig) SRSDeploy(commonEnv *z_dep.CommonEnvironmentConf // copy utils.CopySameFields(commonEnv, srsConfig) + if srsConfig.MinioInnerIP == "" { + log.Error("minio inner ip is nil !") + return false + } + if !z_dep.ParseEnvToApplyFile(srsConfig, CmiiSrsTemplate, z_dep.SRSApplyFilePath) { return false } diff --git a/agent-deploy/d_app/FrontendConfigMap.go b/agent-deploy/d_app/FrontendConfigMap.go index 35ac83a..9bb2592 100755 --- a/agent-deploy/d_app/FrontendConfigMap.go +++ b/agent-deploy/d_app/FrontendConfigMap.go @@ -30,6 +30,7 @@ var FrontendShortNameMaps = map[string]string{ "cmii-uav-platform-visualization": "visualization", "cmii-uav-platform-uasms": "uasms", "cmii-uav-platform-uas": "uas", + "cmii-uav-platform-dispatchh5": "dispatchh5", } var FrontendClientIdMaps = map[string]string{ @@ -62,4 +63,5 @@ var FrontendClientIdMaps = map[string]string{ "cmii-uav-platform-hljtt": "empty", "cmii-uav-platform-uasms": "empty", "cmii-uav-platform-uas": "empty", + "cmii-uav-platform-dispatchh5": "empty", } diff --git a/agent-deploy/d_app/TemplateCmiiBackend.go b/agent-deploy/d_app/TemplateCmiiBackend.go index 503731b..e160a81 100755 --- a/agent-deploy/d_app/TemplateCmiiBackend.go +++ b/agent-deploy/d_app/TemplateCmiiBackend.go @@ -35,7 +35,11 @@ spec: - key: uavcloud.env operator: In values: + {{- if .TenantEnv }} - {{ .TenantEnv }} + {{- else }} + - {{ .Namespace }} + {{- end }} imagePullSecrets: - name: harborsecret containers: diff --git a/agent-deploy/d_app/TemplateIngressConfigMap.go b/agent-deploy/d_app/TemplateIngressConfigMap.go index dd5fe40..f4a73a7 100755 --- a/agent-deploy/d_app/TemplateIngressConfigMap.go +++ b/agent-deploy/d_app/TemplateIngressConfigMap.go @@ -134,8 +134,9 @@ spec: servicePort: 8080 {{- end }} {{- else }} + {{- $tenantEnv := .Namespace }} {{- range $key, $value := .BackendImageVersionMap }} - - host: {{ $key }}.uavcloud-{{ .Namespace }}.io + - host: {{ $key }}.uavcloud-{{ $tenantEnv }}.io http: paths: - path: / diff --git a/agent-deploy/z_dep/G.go b/agent-deploy/z_dep/G.go index ec1ce50..6e6fe22 100755 --- a/agent-deploy/z_dep/G.go +++ b/agent-deploy/z_dep/G.go @@ -124,7 +124,7 @@ func ParseEnvToApplyFile(environment any, applyTemplate string, applyFilePath st var result bytes.Buffer err = tmpl.Execute(&result, environment) if err != nil { - log.ErrorF("template execute error: %v", err) + log.ErrorF("[ParseEnvToApplyFile] - [%s] template execute error: %v", applyFilePath, err) return false } diff --git a/agent-go/a_executor/BaseFunction.go b/agent-go/a_executor/BaseFunction.go index 8cbf175..ed0d71d 100755 --- a/agent-go/a_executor/BaseFunction.go +++ b/agent-go/a_executor/BaseFunction.go @@ -868,6 +868,7 @@ func (op *AgentOsOperator) installDockerExec(args []string) (bool, []string) { ok, log4 := HardCodeCommandExecutor("apt-cache madison docker-ce | grep 20.10.20 | awk '{print$3}'") if ok && log4 != nil && len(log4) > 0 { specificDockerVersion = strings.TrimSpace(log4[0]) + fmt.Println("get docker version from online => " + specificDockerVersion) } log.InfoF("需要安装的docker版本为 => %s", specificDockerVersion) diff --git a/agent-go/a_executor/K8sFunction.go b/agent-go/a_executor/K8sFunction.go index 14e4fe0..c1fd92e 100755 --- a/agent-go/a_executor/K8sFunction.go +++ b/agent-go/a_executor/K8sFunction.go @@ -163,15 +163,15 @@ func K8sDeploymentUpdateTag(supreme, appName, newTag string) (bool, string) { tagVersion = strings.Split(newTag, "-")[0] } envList := container.Env - for _, envVar := range envList { + for index, envVar := range envList { if envVar.Name == "IMAGE_VERSION" { - envVar.Value = tagVersion + envList[index].Value = tagVersion } if envVar.Name == "BIZ_CONFIG_GROUP" { - envVar.Value = tagVersion + envList[index].Value = tagVersion } if envVar.Name == "SYS_CONFIG_GROUP" { - envVar.Value = tagVersion + envList[index].Value = tagVersion } } log.DebugF("[DeploymentUpdateTag] - update env IMAGE_VERSION to [%s]", tagVersion) diff --git a/agent-operator/CmiiDeployOperator.go b/agent-operator/CmiiDeployOperator.go index 027452e..2518261 100755 --- a/agent-operator/CmiiDeployOperator.go +++ b/agent-operator/CmiiDeployOperator.go @@ -5,15 +5,18 @@ import ( "os" "path/filepath" "strings" + image2 "wdd.io/agent-common/image" "wdd.io/agent-common/utils" agentdeploy "wdd.io/agent-deploy" "wdd.io/agent-deploy/z_dep" "wdd.io/agent-operator/image" ) +const DeployFilePrefix = "/home/wdd/IdeaProjects/ProjectOctopus/agent-common/real_project/" + func CmiiEnvDeploy(deployCommonEnv *z_dep.CommonEnvironmentConfig, shouldDoCompleteDeploy bool, backupFromEnv string) { - folderPrefix := "/home/wdd/IdeaProjects/ProjectOctopus/agent-deploy/" + deployCommonEnv.Namespace + "/" + folderPrefix := DeployFilePrefix + deployCommonEnv.Namespace + "/" tenantEnv := deployCommonEnv.Namespace // uavcloud-devflight ==> devflight @@ -29,6 +32,7 @@ func CmiiEnvDeploy(deployCommonEnv *z_dep.CommonEnvironmentConfig, shouldDoCompl // assign folder prefix deployCommonEnv.ApplyFilePrefix = folderPrefix + deployCommonEnv.TenantEnv = tenantEnv var backendMap map[string]string var frontendMap map[string]string @@ -72,8 +76,60 @@ func CmiiEnvDeploy(deployCommonEnv *z_dep.CommonEnvironmentConfig, shouldDoCompl } +func CmiiEnvDeployOffline(deployCommonEnv *z_dep.CommonEnvironmentConfig, shouldDoCompleteDeploy bool, allCmiiImageList []string) { + folderPrefix := DeployFilePrefix + deployCommonEnv.Namespace + "/" + + tenantEnv := deployCommonEnv.Namespace + // uavcloud-devflight ==> devflight + // uavcloud-dev ==> dev + if strings.Contains(tenantEnv, "-") { + split := strings.Split(tenantEnv, "-") + tenantEnv = split[len(split)-1] + } else { + // demo ==> "" + // cqlyj ==> "" + tenantEnv = "" + } + + // assign folder prefix + deployCommonEnv.ApplyFilePrefix = folderPrefix + deployCommonEnv.TenantEnv = tenantEnv + + var backendMap map[string]string + var frontendMap map[string]string + var srsMap map[string]string + + // 输出特定版本的Tag + cmiiImageVersionMap := image2.CmiiImageMapFromImageFullNameList(allCmiiImageList) + frontendMap, backendMap, srsMap = image2.FrontendBackendSrsImageMapFromCmiiImageMap(cmiiImageVersionMap) + + utils.BeautifulPrintWithTitle(backendMap, "backendMap") + utils.BeautifulPrintWithTitle(frontendMap, "frontendMap") + utils.BeautifulPrintWithTitle(srsMap, "srsMap") + + // get the apply file path + deployCommonEnv.GenerateApplyFilePath() + + // do generate all application files + // generate and get all old stuff + agentdeploy.CmiiEnvironmentDeploy(shouldDoCompleteDeploy, deployCommonEnv, backendMap, frontendMap) + + // test + //GetNodeWideByKubectl(deployNamespace) + + // clear old apply file + //clearOldApplyStuff(common, shouldDoCompleteDeploy) + + // apply new app + //applyNewAppStuff(common, shouldDoCompleteDeploy) + + fmt.Println() + fmt.Println("-------------------- all done ---------------------") + fmt.Println() +} + func CmiiNewAppDeploy(deployCommonEnv *z_dep.CommonEnvironmentConfig, newAppNamespace string) { - folderPrefix := "/home/wdd/IdeaProjects/ProjectOctopus/agent-deploy/" + deployCommonEnv.Namespace + "/" + folderPrefix := DeployFilePrefix + deployCommonEnv.Namespace + "/" tenantEnv := deployCommonEnv.Namespace // uavcloud-devflight ==> devflight diff --git a/agent-operator/CmiiDeployOperator_test.go b/agent-operator/CmiiDeployOperator_test.go index 31e3228..a5bd3bf 100755 --- a/agent-operator/CmiiDeployOperator_test.go +++ b/agent-operator/CmiiDeployOperator_test.go @@ -3,6 +3,7 @@ package main import ( "testing" image2 "wdd.io/agent-common/image" + "wdd.io/agent-common/real_project/zhejianyidong_erjipingtai" "wdd.io/agent-deploy/z_dep" ) @@ -44,6 +45,59 @@ func TestCmiiEnvDeploy_ChongQingSanHua(t *testing.T) { } +func TestCmiiEnvDeploy_LiuXiTongGan(t *testing.T) { + + // chongqing sanhua + commonEnv := &z_dep.CommonEnvironmentConfig{ + WebIP: "10.250.0.200", + WebPort: "8888", + HarborIPOrCustomImagePrefix: "10.250.0.200", + HarborPort: "8033", + Namespace: "bjtg", + TagVersion: "5.6.0", + NFSServerIP: "10.250.0.200", + } + + CmiiEnvDeploy(commonEnv, true, demo) + +} + +func TestCmiiEnvDeploy_ZheJiangYiDongErJiPingTai(t *testing.T) { + + // 浙江移动二级平台 + commonEnv := &z_dep.CommonEnvironmentConfig{ + WebIP: "111.2.224.59", + WebPort: "8088", + HarborIPOrCustomImagePrefix: "192.168.10.3", + HarborPort: "8033", + Namespace: "zjyd", + TagVersion: "5.7.0", + NFSServerIP: "192.168.10.3", + MinioInnerIP: "192.168.10.2", + } + + CmiiEnvDeployOffline(commonEnv, true, zhejianyidong_erjipingtai.Cmii570ImageList) + +} + +func TestCmiiEnvDeploy_JiangSuNanTong(t *testing.T) { + + // 江苏南通 + commonEnv := &z_dep.CommonEnvironmentConfig{ + WebIP: "111.2.224.59", + WebPort: "8088", + HarborIPOrCustomImagePrefix: "192.168.10.3", + HarborPort: "8033", + Namespace: "zjyd", + TagVersion: "5.7.0", + NFSServerIP: "192.168.10.3", + MinioInnerIP: "192.168.10.2", + } + + CmiiEnvDeployOffline(commonEnv, true, zhejianyidong_erjipingtai.Cmii570ImageList) + +} + func TestCmiiNewAppDeploy(t *testing.T) { deployNamespace := devOperation diff --git a/agent-operator/CmiiK8sOperator.go b/agent-operator/CmiiK8sOperator.go index 029a60c..ac930ae 100755 --- a/agent-operator/CmiiK8sOperator.go +++ b/agent-operator/CmiiK8sOperator.go @@ -11,14 +11,14 @@ import ( ) var DefaultCmiiOperator = CmiiK8sOperator{} -var updateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt" +var UpdateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt" func init() { switch runtime.GOOS { case "linux": - updateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt" + UpdateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt" case "windows": - updateLogPath = "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\agent-operator\\log\\cmii-update-log.txt" + UpdateLogPath = "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\agent-operator\\log\\cmii-update-log.txt" } } @@ -303,11 +303,12 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) (updateOK boo if cmiiDeploymentInterface == nil { return updateOK, oldImageTag, newImageTag } + // check if need to update oldImageTag = cmiiDeploymentInterface.ImageTag if oldImageTag == newTag { log.DebugF("[UpdateCmiiDeploymentImageTag] - [%s] [%s] image tag are the same ! no need to update !", cmiiEnv, appName) - // restart + // restart deployment if DefaultCmiiOperator.DeploymentRestart(cmiiEnv, appName) { return true, oldImageTag, oldImageTag } else { @@ -329,8 +330,8 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) (updateOK boo return false, oldImageTag, newImageTag } - // append log - utils.AppendContentToFile(content, updateLogPath) + // append update log + utils.AppendContentToFile(content, UpdateLogPath) // re-get from env time.Sleep(time.Second) @@ -340,6 +341,7 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) (updateOK boo return false, oldImageTag, newImageTag } + // result return true, oldImageTag, deploy.ImageTag } @@ -377,7 +379,7 @@ func UpdateCmiiImageTagFromNameTagList(cmiiEnv string, nameTagList []string) (re func RollBackCmiiDeploymentFromUpdateLog(updateLog string) bool { - //if !executor.BasicFindContentInFile(updateLog, updateLogPath) { + //if !executor.BasicFindContentInFile(updateLog, UpdateLogPath) { // log.ErrorF("[RollBackCmiiDeploymentFromUpdateLog] - [%s] no this update log ! use update instead ! => ", updateLog) // return false //} @@ -734,6 +736,7 @@ func FilterAllCmiiNodeSoft(nodeList []CmiiNodeInterface) (result []CmiiNodeInter return result } +// AppNameBelongsToCmiiImage 根据CmiiBackendAppMap CmiiFrontendAppMap判断一个appName是否属于CMII func AppNameBelongsToCmiiImage(appName string) bool { _, ok := d_app.CmiiBackendAppMap[appName] if !ok { diff --git a/agent-operator/CmiiK8sOperator_test.go b/agent-operator/CmiiK8sOperator_test.go index 3846994..48fa096 100755 --- a/agent-operator/CmiiK8sOperator_test.go +++ b/agent-operator/CmiiK8sOperator_test.go @@ -141,78 +141,78 @@ func TestBackupAllCmiiDeploymentToMap(t *testing.T) { } func TestBackupAllCmiiDeploymentToList(t *testing.T) { - allCmiiImageList := BackupAllCmiiDeploymentToList(demo, false) + allCmiiImageList := BackupAllCmiiDeploymentToList(demo, true) utils.BeautifulPrint(allCmiiImageList) } +// Update DEMO by Tag Update func TestUpdateCmiiImageTagFromNameTagMap(t *testing.T) { cmii530BackendMap := map[string]string{ - "cmii-admin-data": "5.3.0", - "cmii-admin-gateway": "5.3.0", - "cmii-admin-user": "5.3.0", - "cmii-open-gateway": "5.3.0", - "cmii-suav-supervision": "5.3.0", - "cmii-uav-airspace": "5.3.0", - "cmii-uav-alarm": "5.3.0", - "cmii-uav-brain": "5.3.0", - "cmii-uav-cloud-live": "5.3.0", - "cmii-uav-cms": "5.3.0", - "cmii-uav-data-post-process": "5.3.0", - "cmii-uav-developer": "5.3.0", - "cmii-uav-device": "5.3.0", - "cmii-uav-emergency": "5.3.0", - "cmii-uav-gateway": "5.3.0", - "cmii-uav-gis-server": "5.3.0", - "cmii-uav-industrial-portfolio": "5.3.0", - "cmii-uav-integration": "5.3.0", - "cmii-uav-logger": "5.3.0", - "cmii-uav-material-warehouse": "5.3.0", - "cmii-uav-mission": "5.3.0", - "cmii-uav-mqtthandler": "5.3.0", - "cmii-uav-notice": "5.3.0", - "cmii-uav-oauth": "5.3.0", - "cmii-uav-process": "5.3.0", - "cmii-uav-surveillance": "5.3.0", - "cmii-uav-threedsimulation": "5.3.0", - "cmii-uav-tower": "5.3.0", - "cmii-uav-user": "5.3.0", - "cmii-uav-waypoint": "5.3.0", - //"cmii-uav-grid-datasource": "5.2.0-24810", - //"cmii-uav-grid-engine": "5.1.0", - //"cmii-uav-grid-manage": "5.1.0", + "cmii-admin-data": "5.7.0", + "cmii-admin-gateway": "5.7.0", + "cmii-admin-user": "5.7.0", + "cmii-open-gateway": "5.7.0", + "cmii-suav-supervision": "5.7.0", + "cmii-uav-airspace": "5.7.0", + "cmii-uav-alarm": "5.7.0", + "cmii-uav-brain": "5.7.0", + "cmii-uav-cloud-live": "5.7.0", + "cmii-uav-cms": "5.7.0", + "cmii-uav-data-post-process": "5.7.0", + "cmii-uav-developer": "5.7.0", + "cmii-uav-device": "5.7.0", + "cmii-uav-emergency": "5.7.0", + "cmii-uav-gateway": "5.7.0", + "cmii-uav-gis-server": "5.7.0", + "cmii-uav-industrial-portfolio": "5.7.0", + "cmii-uav-integration": "5.7.0", + "cmii-uav-logger": "5.7.0", + "cmii-uav-material-warehouse": "5.7.0", + "cmii-uav-mission": "5.7.0", + "cmii-uav-mqtthandler": "5.7.0", + "cmii-uav-notice": "5.7.0", + "cmii-uav-oauth": "5.7.0", + "cmii-uav-process": "5.7.0", + "cmii-uav-surveillance": "5.7.0", + "cmii-uav-threedsimulation": "5.7.0", + "cmii-uav-tower": "5.7.0", + "cmii-uav-user": "5.7.0", + "cmii-uav-waypoint": "5.7.0", + "cmii-uav-sense-adapter": "5.7.0", + "cmii-uav-multilink": "5.7.0", } cmii530FrontendMap := map[string]string{ - "cmii-suav-platform-supervision": "5.3.0", - "cmii-suav-platform-supervisionh5": "5.3.0", - "cmii-uav-platform": "5.3.0", - "cmii-uav-platform-ai-brain": "5.3.0", - "cmii-uav-platform-armypeople": "5.3.0", - "cmii-uav-platform-base": "5.3.0", - "cmii-uav-platform-cms-portal": "5.3.0", - "cmii-uav-platform-detection": "5.3.0", - "cmii-uav-platform-emergency-rescue": "5.3.0", - "cmii-uav-platform-logistics": "5.3.0", - "cmii-uav-platform-media": "5.3.0", - "cmii-uav-platform-multiterminal": "5.3.0", - "cmii-uav-platform-mws": "5.3.0", - "cmii-uav-platform-oms": "5.3.0", - "cmii-uav-platform-open": "5.3.0", - "cmii-uav-platform-securityh5": "5.3.0", - "cmii-uav-platform-seniclive": "5.3.0", - "cmii-uav-platform-share": "5.3.0", - "cmii-uav-platform-splice": "5.3.0", - "cmii-uav-platform-threedsimulation": "5.3.0", - "cmii-uav-platform-visualization": "5.3.0", + "cmii-suav-platform-supervision": "5.7.0", + "cmii-suav-platform-supervisionh5": "5.7.0", + "cmii-uav-platform": "5.7.0", + "cmii-uav-platform-ai-brain": "5.7.0", + "cmii-uav-platform-armypeople": "5.7.0", + //"cmii-uav-platform-base": "5.7.0", + "cmii-uav-platform-cms-portal": "5.7.0", + //"cmii-uav-platform-detection": "5.7.0", + //"cmii-uav-platform-emergency-rescue": "5.7.0", + //"cmii-uav-platform-logistics": "5.7.0", + "cmii-uav-platform-media": "5.7.0", + //"cmii-uav-platform-multiterminal": "5.7.0", + "cmii-uav-platform-mws": "5.7.0", + "cmii-uav-platform-oms": "5.7.0", + "cmii-uav-platform-open": "5.7.0", + "cmii-uav-platform-securityh5": "5.7.0", + //"cmii-uav-platform-seniclive": "5.7.0", + "cmii-uav-platform-share": "5.7.0", + //"cmii-uav-platform-splice": "5.7.0", + //"cmii-uav-platform-threedsimulation": "5.7.0", + //"cmii-uav-platform-visualization": "5.7.0", //"cmii-uav-platform-security": "4.1.6", } - result := UpdateCmiiImageTagFromNameTagMap("demo", cmii530BackendMap) + result := UpdateCmiiImageTagFromNameTagMap(demo, cmii530BackendMap) utils.BeautifulPrint(result) - result = UpdateCmiiImageTagFromNameTagMap("demo", cmii530FrontendMap) + result = UpdateCmiiImageTagFromNameTagMap(demo, cmii530FrontendMap) utils.BeautifulPrint(result) } @@ -253,7 +253,7 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) { // 计算20:00的时间 now := time.Now() - targetTime := time.Date(now.Year(), now.Month(), now.Day(), 17, 05, 00, 0, now.Location()) + targetTime := time.Date(now.Year(), now.Month(), now.Day(), 12, 50, 00, 0, now.Location()) duration := time.Duration(0) @@ -277,10 +277,11 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) { appNameTagMap := map[string]string{ //"cmii-uav-platform-dispatchh5": "5.6.0-062401", //"cmii-uav-data-post-process": "5.6.0-062401", - "cmii-uav-industrial-portfolio": "5.6.0-071601", - //"cmii-uav-multilink": "5.5.0", - //"cmii-uav-developer": "5.6.0-062701", - //"cmii-uav-platform": "5.6.0-0626", + //"cmii-uav-industrial-portfolio": "5.6.0-071801", + "cmii-uav-industrial-portfolio": "5.7.0-31369-yunnan-082102", + //"cmii-uav-brain": "5.5.0", + //"cmii-uav-platform": "5.6.0-071702", + //"cmii-uas-lifecycle": "5.6.0-30403-071802", } for appName, newTag := range appNameTagMap { diff --git a/agent-operator/CmiiMinioOperator.go b/agent-operator/CmiiMinioOperator.go index 5da10c2..8d2f002 100755 --- a/agent-operator/CmiiMinioOperator.go +++ b/agent-operator/CmiiMinioOperator.go @@ -186,6 +186,8 @@ func (op *MinioOperator) UploadFile(bucketNameWithSuffix, filePath, fileName str filePath += string(separator) } + fileName = strings.TrimPrefix(fileName, "/") + bucketNameWithSuffix = strings.TrimPrefix(bucketNameWithSuffix, "/") oldBucketName := bucketNameWithSuffix realFileName := fileName @@ -205,6 +207,6 @@ func (op *MinioOperator) UploadFile(bucketNameWithSuffix, filePath, fileName str return false } - log.InfoF("[UploadFile] - uploaded %s of size %d", filePath+fileName, n) + log.InfoF("[UploadFile] - uploaded [%s] of size %d", filePath+realFileName, n) return true } diff --git a/agent-operator/CmiiOperator.go b/agent-operator/CmiiOperator.go deleted file mode 100755 index 5f38c2f..0000000 --- a/agent-operator/CmiiOperator.go +++ /dev/null @@ -1,512 +0,0 @@ -package main - -import ( - "errors" - "os" - "slices" - "strings" - image2 "wdd.io/agent-common/image" - "wdd.io/agent-common/utils" - "wdd.io/agent-deploy/d_app" - "wdd.io/agent-operator/image" -) - -const OfflineDeployHarborHost = "harbor.wdd.io" -const PublicDeployHarborHost = "42.192.52.227" -const DirectPushDeployHarborHost = "chongqingcis-9b4a3da9.ecis.chongqing-1.cmecloud.cn" - -type ImageSyncEntity struct { - ProjectName string // 优先级3 优先级最低 从DEMO拉取镜像 - ProjectVersion string // 优先级2 高于ProjectName 优先拉取特定版本的镜像 - - CmiiNameTagList []string // 优先级1 appName:tag 会被转换为FullNameImageList - FullNameImageList []string // 优先级1 优先下载此类型 - - ShouldDownloadImage bool // 下载镜像 DCU中的D - ShouldCompressImageToGzip bool // 压缩镜像 DCU中的C - ShouldUploadToDemoMinio bool // 上传镜像 DCU中的U - - ShouldDownloadFromOss bool // 下载镜像 DLTU中的D - ShouldUpdateImageTag bool // 更新镜像 DLTU中的U - - ShouldDirectPushToHarbor bool // 直接推送到对方的主机 || 离线部署机 - DirectHarborHost string // IP:Port or 域名:PORT 不带http前缀 -} - -type ImageSyncResult struct { - ErrorPullImageList []string - ErrorGzipImageList []string - ErrorPushImageNameList []string - RealImageNameList []string - RealGzipFileNameList []string - AllCmiiImageNameList []string -} - -// PullFromEntityAndSyncConditionally 根据ImageSyncEntity拉取特定的镜像,然后上传到特定的目标机器(或者上传的minio中) -func (sync ImageSyncEntity) PullFromEntityAndSyncConditionally() (imageSyncResult ImageSyncResult) { - - var realCmiiImageList []string - var allCmiiImageNameList []string - - var errorPullImageList []string - var allGzipFileNameList []string - var errorGzipImageList []string - var errorPushImageNameList []string - var gzipFolderFullPath string - - if (sync.CmiiNameTagList == nil && sync.FullNameImageList == nil) || (len(sync.CmiiNameTagList) == 0 && len(sync.FullNameImageList) == 0) { - // 没有指定特定的镜像,那么根据 ProjectVersion 或者从DEMO拉取镜像 - // pull images - // compress - if sync.ProjectVersion != "" { - - // 获取特定版本的镜像 - errorPullImageList, errorGzipImageList, allCmiiImageNameList, allGzipFileNameList = C_DownloadCompressUploadFromVersion(sync.ProjectVersion, sync.ShouldCompressImageToGzip, sync.ShouldUploadToDemoMinio) - - gzipFolderFullPath = image.OfflineImageGzipFolderPrefix + sync.ProjectVersion - - } else { - - // 获取DEMO的镜像 - errorPullImageList, errorGzipImageList, allCmiiImageNameList, allGzipFileNameList = C_DownloadCompressUploadFromDemo(sync.ProjectName, sync.ShouldCompressImageToGzip, sync.ShouldUploadToDemoMinio) - gzipFolderFullPath = image.OfflineImageGzipFolderPrefix + sync.ProjectName - - } - } else { - // 拉取特定的镜像 - - gzipFolderFullPath = image.OfflineImageGzipFolderPrefix + "tmp" - // 组装镜像名称 - allCmiiImageNameList = concatAndUniformCmiiImage(sync.FullNameImageList, sync.CmiiNameTagList) - - // DCU - errorPullImageList, errorGzipImageList, realCmiiImageList, allGzipFileNameList = A_DownloadCompressUpload(true, allCmiiImageNameList, sync.ShouldCompressImageToGzip, gzipFolderFullPath, sync.ShouldUploadToDemoMinio) - } - - // 直接传输到目标Harbor仓库 - if sync.ShouldDirectPushToHarbor { - if sync.DirectHarborHost == "" { - log.ErrorF("DirectHarborHost is null ! can't push to target harbor !") - } - // push to - errorPushImageNameList = image.TagFromListAndPushToCHarbor(allCmiiImageNameList, sync.DirectHarborHost) - } - - // build result - imageSyncResult.AllCmiiImageNameList = allCmiiImageNameList - - imageSyncResult.RealImageNameList = realCmiiImageList - imageSyncResult.ErrorPullImageList = errorPullImageList - - imageSyncResult.RealGzipFileNameList = allGzipFileNameList - imageSyncResult.ErrorGzipImageList = errorGzipImageList - - imageSyncResult.ErrorPushImageNameList = errorPushImageNameList - - return imageSyncResult -} - -func concatAndUniformCmiiImage(fullImageList []string, cmiiImageList []string) []string { - - if cmiiImageList != nil || len(cmiiImageList) > 0 { - // cmiiImageList has content - if fullImageList == nil { - fullImageList = []string{} - } - - for _, cmiiImage := range cmiiImageList { - fullImageList = append(fullImageList, image2.CmiiHarborPrefix+cmiiImage) - } - } - - return fullImageList -} - -// A_DownloadCompressUpload DCU 镜像同步的前半部分,通常在35.71 LapPro执行,无需Bastion Mode -func A_DownloadCompressUpload(downloadImage bool, fullNameList []string, shouldGzip bool, gzipFolderFullPath string, shouldOss bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileFullNameList []string) { - - // write to file - localGzipFileListTxt := gzipFolderFullPath + "all-gzip-image-file-name.txt" - - // Download - log.Info("DOWNLOAD START !") - if downloadImage { - if fullNameList == nil || len(fullNameList) == 0 { - log.InfoF("no image name list !") - } else { - errorPullImageList = image.PullFromFullNameList(fullNameList) - } - } - - // remove failed - fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool { - return slices.Contains(errorPullImageList, imageName) - }) - - // Compress - if shouldGzip { - - // remove file - _ = os.Remove(localGzipFileListTxt) - - gzipFileAlready := make(map[string]bool) - if utils.FileOrFolderExists(gzipFolderFullPath) { - dir, _ := os.ReadDir(gzipFolderFullPath) - for _, entry := range dir { - if entry.IsDir() { - continue - } - gzipFileAlready[strings.TrimPrefix(entry.Name(), gzipFolderFullPath)] = true - } - } - - // mkdir folder - err := os.MkdirAll(gzipFolderFullPath, os.ModeDir) - if err != nil { - if !errors.Is(err, os.ErrExist) { - log.ErrorF("create folder error of %s", gzipFolderFullPath) - panic(err) - } - } - - // 循环遍历压缩 - log.Info("COMPRESS START") - - for _, imageFullName := range fullNameList { - - // gzip image file already exists - - gzipFileName := image2.ImageFullNameToGzipFileName(imageFullName) - gzipImageFileFullPath := gzipFolderFullPath + gzipFileName - _, ok := gzipFileAlready[gzipFileName] - if len(gzipFileAlready) > 0 && ok { - log.DebugF("gzip file %s already exists !", gzipFileName) - } else { - ok, gzipImageFileFullPath = image.SaveToGzipFile(imageFullName, gzipFolderFullPath) - if !ok { - errorGzipImageList = append(errorGzipImageList, imageFullName) - continue - } - } - - // 压缩成功 - allGzipFileFullNameList = append(allGzipFileFullNameList, gzipImageFileFullPath) - } - // remove failed - fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool { - return slices.Contains(errorGzipImageList, imageName) - }) - - for _, gzipFileFullName := range allGzipFileFullNameList { - utils.AppendContentToFile( - strings.TrimPrefix(gzipFileFullName, gzipFolderFullPath)+"\n", - localGzipFileListTxt, - ) - } - - log.InfoF("all gzip file name list is %s", localGzipFileListTxt) - } - - // Upload - if shouldOss { - //uploadGzipFileToDemoMinio() - // get gzip file name list - log.Info("UPLOAD OSS START !") - - // start to upload - // extract demo oss location suffix from gzipFolderFullPath - trimPrefix := strings.TrimPrefix(gzipFolderFullPath, image.OfflineImageGzipFolderPrefix) - bucketNameWithPrefix := "cmlc-installation/" + trimPrefix - log.InfoF("gzip file location in demo oss is %s", DefaultDemoEndpoint+"/"+bucketNameWithPrefix) - - // upload gzip file list txt to demo - if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipFolderFullPath, strings.TrimPrefix(localGzipFileListTxt, gzipFolderFullPath)) { - log.ErrorF("upload of %s to demo oss error !", localGzipFileListTxt) - } - - log.InfoF("upload all gzip file to demo oss !") - for _, gzipFileFullName := range allGzipFileFullNameList { - // SaveToGzipFile 返回的是全路径 归一化处理 gzip file name - gzipFileFullName = strings.TrimPrefix(gzipFileFullName, gzipFolderFullPath) - if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipFolderFullPath, gzipFileFullName) { - log.ErrorF("upload of %s to demo oss error !", gzipFileFullName) - } - } - - } - - return errorPullImageList, errorGzipImageList, fullNameList, allGzipFileFullNameList -} - -// A_DownloadLoadTagUpload DLTU procedure ImageSync的另外一般流程,需要支持 堡垒机(纯离线)的模式 -// 2. Gzip文件目录,RKE MIDDLE CMII三个文件目录 - 约定目录 -// 约定目录 /root/wdd/image/rke/ /root/wdd/image/middle/ /root/wdd/image/cmii/ -// 3. 读取本机的IP地址 - 参数传递 -// 4. OSS地址 - ossUrlPrefix传空 则使用默认值 -// 5. ossFileName - 如果结尾为txt,则为文件的形式,如果为tar.gz,则为gzip文件夹的形式 -func A_DownloadLoadTagUpload(downloadFromOss bool, ossUrlPrefix, ossFileNameOrGzipFileListTxt, localGzipFolderOrGzipFile string, targetHarborFullName string) (targetImageFullNameList []string) { - - // 支持单文件的形式 - if !utils.IsDirOrFile(localGzipFolderOrGzipFile) { - // 单个压缩文件 肯定是离线的形式 - if !strings.HasSuffix(localGzipFolderOrGzipFile, ".tar.gz") { - log.ErrorF("local gzip file %s is not a .tar.gz file !", localGzipFolderOrGzipFile) - return nil - } - - // load - image.LoadFromGzipFilePath(localGzipFolderOrGzipFile) - } else { - separator := os.PathSeparator - if !strings.HasSuffix(localGzipFolderOrGzipFile, string(separator)) { - localGzipFolderOrGzipFile += string(separator) - } - - // download - if downloadFromOss { - if !parseAndDownloadFromOss(ossUrlPrefix, ossFileNameOrGzipFileListTxt, localGzipFolderOrGzipFile) { - log.ErrorF("download from oss error !") - return nil - } - } - - // load - loadAllGzipImageFromLocalFolder(localGzipFolderOrGzipFile) - } - - // tag - // push - allFileInFolder, err := utils.ListAllFileInFolder(localGzipFolderOrGzipFile) - if err != nil { - return nil - } - for _, gzipFileName := range allFileInFolder { - // 过滤非.tar.gz结尾的文件 - if !strings.HasSuffix(gzipFileName, ".tar.gz") { - continue - } - - log.DebugF("gzip file name is %s", gzipFileName) - - // gzip to image full name 拿到镜像的原始名称 - imageFullName := image2.GzipFileNameToImageFullName(gzipFileName) - if imageFullName == "" { - log.ErrorF("gzip file %s to image full name error !", gzipFileName) - continue - } - - // tag 拿到目标名称 然后重新Tag - targetImageFullName := image2.ImageNameToTargetImageFullName(imageFullName, targetHarborFullName) - image.TagFromSourceToTarget(imageFullName, targetImageFullName) - - // uploadToHarbor 上传到目标Harbor - if image.UploadToHarbor(targetImageFullName) { - targetImageFullNameList = append(targetImageFullNameList, targetImageFullName) - } else { - log.ErrorF("upload to harbor error of %s", targetImageFullName) - } - } - - return targetImageFullNameList -} - -func loadAllGzipImageFromLocalFolder(localGzipFolder string) { - image.LoadFromFolderPath(localGzipFolder) -} - -func parseAndDownloadFromOss(ossUrlPrefix, ossFileName, localGzipFolder string) bool { - - if ossUrlPrefix == "" { - ossUrlPrefix = DefaultOssUrlPrefix - } - if !strings.HasSuffix(ossUrlPrefix, "/") { - ossUrlPrefix += "/" - } - - log.InfoF("prepare to download from %s%s", ossUrlPrefix, ossFileName) - - if !DefaultCmiiMinioOperator.DemoMinioOperator.DownloadFileFromOssFullUrl(ossUrlPrefix+ossFileName, localGzipFolder) { - log.ErrorF("download %s from oss error !", ossUrlPrefix+ossFileName) - return false - } - - if strings.HasSuffix(ossFileName, ".txt") { - // download from gzip file list txt - // download all files in the txt file - result := utils.ReadAllContentFromFile(localGzipFolder + ossFileName) - for _, gzipFileName := range result { - DefaultCmiiMinioOperator.DemoMinioOperator.DownloadFileFromOssFullUrl(ossUrlPrefix+gzipFileName, localGzipFolder) - } - } - - // 解析 - return true -} - -// C_DownloadCompressUploadFromDemo 获取DEMO环境的全部镜像 -func C_DownloadCompressUploadFromDemo(projectName string, shouldGzip, shouldOss bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList []string) { - - // generate a project folder - err := os.MkdirAll(image.OfflineImageGzipFolderPrefix+projectName, os.ModeDir) - if err != nil { - if !errors.Is(err, os.ErrExist) { - log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", image.OfflineImageGzipFolderPrefix+projectName, err.Error()) - return errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList - } - } - - // get demo image version map - allCmiiImageNameListFromDemo := buildAllCmiiImageNameListFromDemo(projectName) - - // do work - // DCU - return A_DownloadCompressUpload(true, allCmiiImageNameListFromDemo, shouldGzip, image.OfflineImageGzipFolderPrefix+projectName, shouldOss) -} - -func buildAllCmiiImageNameListFromDemo(projectName string) []string { - - var realCmiiImageName []string - - backendMap, frontendMap, srsMap := BackupAllCmiiDeploymentToMap(demo) - - // save map to file - backendMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-backend-app.json" - frontendMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-frontend-app.json" - srsMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-srs-app.json" - _ = os.Remove(backendMapFile) - _ = os.Remove(frontendMapFile) - _ = os.Remove(srsMapFile) - - //utils.AppendContentToFile( - // utils.BeautifulPrintToString(backendMap), - // backendMapFile, - //) - //utils.AppendContentToFile( - // utils.BeautifulPrintToString(frontendMap), - // frontendMapFile, - //) - //utils.AppendContentToFile( - // utils.BeautifulPrintToString(srsMapFile), - // srsMapFile, - //) - - realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(backendMap)...) - realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(frontendMap)...) - realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(srsMap)...) - - utils.BeautifulPrintListWithTitle(realCmiiImageName, "Cmii Project Image => "+projectName) - - return realCmiiImageName -} - -// C_DownloadCompressUploadFromVersion 根据版本下载全部的CMII镜像 -func C_DownloadCompressUploadFromVersion(cmiiVersion string, shouldGzip bool, shouldOss bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList []string) { - - // generate a project folder - err := os.MkdirAll(image.OfflineImageGzipFolderPrefix+cmiiVersion, os.ModeDir) - if err != nil { - if !errors.Is(err, os.ErrExist) { - log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", image.OfflineImageGzipFolderPrefix+cmiiVersion, err.Error()) - return errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList - } - } - - // build all cmii image name list - realCmiiImageName = buildAllCmiiImageNameListFromVersion(cmiiVersion) - - // do work - // DCU procedure - return A_DownloadCompressUpload(true, realCmiiImageName, shouldGzip, image.OfflineImageGzipFolderPrefix+cmiiVersion, shouldOss) - -} - -func buildAllCmiiImageNameListFromVersion(cmiiVersion string) []string { - - var realCmiiImageName []string - - backendMap := d_app.CmiiBackendAppMap - frontendMap := d_app.CmiiFrontendAppMap - - for app := range backendMap { - backendMap[app] = cmiiVersion - } - for app := range frontendMap { - frontendMap[app] = cmiiVersion - } - - realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(backendMap)...) - realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(frontendMap)...) - - for key, value := range d_app.CmiiSrsAppMap { - var app *CmiiDeploymentInterface - if strings.Contains(value, "deployment") { - app = DefaultCmiiOperator.DeploymentOneInterface(demo, key) - if app != nil { - realCmiiImageName = append(realCmiiImageName, app.Image) - } - } else if strings.Contains(value, "state") { - app = DefaultCmiiOperator.StatefulSetOneInterface(demo, key) - if app != nil { - for _, imageName := range app.ContainerImageMap { - realCmiiImageName = append(realCmiiImageName, imageName) - } - } - } - } - utils.BeautifulPrintListWithTitle(realCmiiImageName, "Cmii Version Image => "+cmiiVersion) - return realCmiiImageName -} - -// C_DownloadCompressUploadDependency DCU所有的依赖镜像 -func C_DownloadCompressUploadDependency(shouldGzip bool, shouldOss bool, shouldDownload bool, isRKE bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList []string) { - - log.Info("DCU for middle and rke!") - err := os.MkdirAll(image.OfflineImageGzipFolderPrefix, os.ModeDir) - if err != nil { - if !errors.Is(err, os.ErrExist) { - log.ErrorF("[FetchDependencyRepos] - create folder of %s error %s", image.OfflineImageGzipFolderPrefix, err.Error()) - } - } - - var fulleImageNameList []string - var gzipFolderPrefix string - - if isRKE { - log.Info("DCU for rke!") - fulleImageNameList = d_app.Rancher1204Amd64 - gzipFolderPrefix = image.OfflineImageGzipFolderPrefix + "rke/" - } else { - log.Info("DCU for middle!") - - fulleImageNameList = d_app.MiddlewareAmd64 - gzipFolderPrefix = image.OfflineImageGzipFolderPrefix + "middle/" - } - - return A_DownloadCompressUpload(shouldDownload, fulleImageNameList, shouldGzip, gzipFolderPrefix, shouldOss) -} - -func LoadSplitCmiiGzipImageToTargetHarbor(projectName, targetHarborHost string) (errorLoadImageNameList, errorPushImageNameList []string) { - - // list folder - projectGzipFolder := image.OfflineImageGzipFolderPrefix + projectName - errorLoadImageNameList = append(errorLoadImageNameList, image.LoadFromFolderPath(projectGzipFolder)...) - // read from json - errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(d_app.Cmii520DemoImageList, targetHarborHost)...) - - // re-tag - // push - - // todo clean host and harbor - // check harbor exits - - return errorLoadImageNameList, errorPushImageNameList -} - -func LoadSplitDepGzipImageToTargetHarbor(targetHarborHost string) (errorLoadImageNameList []string, errorPushImageNameList []string) { - - errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(d_app.MiddlewareAmd64, targetHarborHost)...) - //errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(d_app.Rancher1204Amd64, targetHarborHost)...) - - return errorLoadImageNameList, errorPushImageNameList - -} diff --git a/agent-operator/ImageSyncDLTU.sh b/agent-operator/ImageSyncDLTU.sh new file mode 100644 index 0000000..ed20e2e --- /dev/null +++ b/agent-operator/ImageSyncDLTU.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本 +gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改 +oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation" +local_gzip_path="/root/wdd/octopus_image_tmp" +DockerRegisterDomain="10.250.0.100:8033" # 需要根据实际修改 +HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致 + +print_green() { + echo -e "\033[32m${1}\033[0m" + echo "" +} + +print_red() { + echo -e "\033[31m${1}\033[0m" + echo "" +} + +Download_Load_Tag_Upload() { + print_green "[DLTU] - start !" + while [[ $# -gt 0 ]]; do + case "$1" in + rke) + # print_green "download rke " + local_gzip_path="$local_gzip_path/rke" + mkdir -p ${local_gzip_path} + oss_prefix_url="$oss_prefix_url/rke/" + dltu + shift # past argument + ;; + middle) + local_gzip_path="$local_gzip_path/middle" + mkdir -p $local_gzip_path + oss_prefix_url="$oss_prefix_url/middle/" + dltu + shift # past argument + ;; + cmii) + local_gzip_path="$local_gzip_path/cmii" + mkdir -p $local_gzip_path + oss_prefix_url="$oss_prefix_url/cmii/" + dltu + shift # past argument + ;; + *) + # unknown option + print_red "bad arguments" + ;; + esac + shift # past argument or value + done + +} + +dltu() { + print_green "download all image name list and gzip file list!" + cd $local_gzip_path || exit + wget "$oss_prefix_url$all_image_list_txt" + wget "$oss_prefix_url$gzip_image_list_txt" + + docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain} + echo "" + while IFS= read -r i; do + [ -z "${i}" ] && continue + echo "download gzip file =>: $oss_prefix_url${i}" + if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then + echo "Gzip file download success : ${i}" + image_full_name=$(docker load -i ${i} | awk -F': ' '{print $2}') + + app_name=$(echo "$image_full_name" | sed 's|.*/||g') + echo "extract short name is $app_name" + + if echo $image_full_name | grep -q "rancher" + then + print_green "tag image to => $DockerRegisterDomain/rancher/$app_name" + docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name + docker push $DockerRegisterDomain/rancher/$app_name + else + echo "" + fi + + else + print_red "Gzip file download FAILED : ${i}" + fi + echo "-------------------------------------------------" + done <"${gzip_image_list_txt}" + shift + +} + +test(){ + app_name=$(echo "nginx:latest" | sed 's|.*/||g') + echo "extract short name is $app_name" +} + +# test +Download_Load_Tag_Upload "rke" diff --git a/agent-operator/ImageSyncOperator.go b/agent-operator/ImageSyncOperator.go new file mode 100755 index 0000000..be17ab0 --- /dev/null +++ b/agent-operator/ImageSyncOperator.go @@ -0,0 +1,704 @@ +package main + +import ( + "errors" + "os" + "path/filepath" + "slices" + "strings" + image2 "wdd.io/agent-common/image" + "wdd.io/agent-common/utils" + "wdd.io/agent-deploy/d_app" + "wdd.io/agent-operator/image" +) + +const ( + OfflineDeployHarborHost = "harbor.wdd.io" + PublicDeployHarborHost = "42.192.52.227" + DirectPushDeployHarborHost = "chongqingcis-9b4a3da9.ecis.chongqing-1.cmecloud.cn" + + AllCmiiImageListLocalFileName = "all-cmii-image-list.txt" + AllGzipImageLocalFileName = "all-gzip-image-list.txt" +) + +type ImageSyncEntity struct { + DownloadCondition *DownloadEntity // D的条件 + CompressCondition *CompressEntity // C的条件 + UploadCondition *UploadEntity // U的条件 + + ShouldDownloadFromOss bool // 下载镜像 DLTU中的D + ShouldUpdateImageTag bool // 更新镜像 DLTU中的U + + ShouldDirectPushToHarbor bool // 直接推送到对方的主机 || 离线部署机 + DirectHarborHost string // IP:Port or 域名:PORT 不带http前缀 +} + +// DownloadEntity DCU中的D的条件 +type DownloadEntity struct { + ShouldDownloadImage bool // 下载镜像 DCU中的D 实际无用 + + ProjectName string // 优先级3 优先级最低 从DEMO拉取镜像 + ProjectVersion string // 优先级2 高于ProjectName 优先拉取特定版本的镜像 + + CmiiNameTagList []string // 优先级1 appName:tag 会被转换为FullNameImageList + FullNameImageList []string // 优先级1 优先下载此类型 + + DownloadAuthUserName string // 下载需要认证的用户名 + DownloadAuthPassword string // 下载需要认证的密码 +} + +// CompressEntity DCU中的C的条件 +type CompressEntity struct { + ShouldCompressImageToGzip bool // 压缩镜像 DCU中的C + ShouldGzipSplit bool // 压缩镜像 是否应该分割存储 true=独立存储 false=整个存储 + GzipLocalFolder string // 压缩镜像 保存压缩镜像文件的本地目录 +} + +// UploadEntity DCU中的U的条件 +type UploadEntity struct { + ShouldUploadToDemoMinio bool // 上传镜像 DCU中的U +} + +type ImageSyncResult struct { + ProcedureSuccessImageList []string // 经过特定步骤之后成功的镜像 + + DownloadResult *DownloadResultEntity + CompressResult *CompressResultEntity + UploadResult *UploadResultEntity +} + +type DownloadResultEntity struct { + ErrorPullImageList []string // 下载镜像 DCU中的D 下载失败的镜像 + SuccessPullImageList []string // 下载镜像 DCU中的D 下载成功的镜像 + SuccessPullTxtFileLocalFullPath string // 下载镜像 DCU中的D 下载成功的镜像保存的文件地址 GzipLocalFolder + all +} + +type CompressResultEntity struct { + ErrorGzipImageList []string // 压缩镜像 DCU中的C 压缩失败的镜像 + SuccessGzipImageList []string // 压缩镜像 DCU中的C 压缩成功的镜像 + GzipTxtFileLocalFullPath string // 压缩镜像 DCU中的C 压缩镜像保存的目录 +} + +type UploadResultEntity struct { + ErrorUploadImageList []string // 上传镜像 DCU中的U 上传失败的镜像 + AllDownloadUrl []string // 上传镜像 DCU中的U 正式的下载地址列表i +} + +// PullFromEntityAndSyncConditionally 根据ImageSyncEntity拉取特定的镜像,然后上传到特定的目标机器(或者上传的minio中) +func (syncCondition *ImageSyncEntity) PullFromEntityAndSyncConditionally() (imageSyncResult *ImageSyncResult) { + + imageSyncResult = &ImageSyncResult{ + ProcedureSuccessImageList: nil, + DownloadResult: &DownloadResultEntity{ + ErrorPullImageList: nil, + SuccessPullImageList: nil, + SuccessPullTxtFileLocalFullPath: "", + }, + CompressResult: &CompressResultEntity{ + ErrorGzipImageList: nil, + SuccessGzipImageList: nil, + GzipTxtFileLocalFullPath: "", + }, + UploadResult: &UploadResultEntity{ + ErrorUploadImageList: nil, + AllDownloadUrl: nil, + }, + } + + if (syncCondition.DownloadCondition.CmiiNameTagList == nil && syncCondition.DownloadCondition.FullNameImageList == nil) || (len(syncCondition.DownloadCondition.CmiiNameTagList) == 0 && len(syncCondition.DownloadCondition.FullNameImageList) == 0) { + // 没有指定特定的镜像,那么根据 ProjectVersion 或者从DEMO拉取镜像 + // pull images + // compress + if syncCondition.DownloadCondition.ProjectVersion != "" { + + // 获取特定版本的镜像 + C_DownloadCompressUploadFromVersion(syncCondition, imageSyncResult) + + } else { + // 获取DEMO的镜像 + C_DownloadCompressUploadFromDemo(syncCondition, imageSyncResult) + } + } else { + // 根据列表拉取镜像 + + // 组装镜像名称 + syncCondition.DownloadCondition.FullNameImageList = concatAndUniformCmiiImage(syncCondition.DownloadCondition.FullNameImageList, syncCondition.DownloadCondition.CmiiNameTagList) + + // gzip file folder path + syncCondition.CompressCondition.GzipLocalFolder = filepath.Join(image.OfflineImageGzipFolderPrefix, "tmp") + + // DCU + A_DownloadCompressUpload(syncCondition, imageSyncResult) + } + + // 直接传输到目标Harbor仓库 + if syncCondition.ShouldDirectPushToHarbor { + if syncCondition.DirectHarborHost == "" { + log.ErrorF("DirectHarborHost is null ! can't push to target harbor !") + } + // push to + //errorPushImageNameList = image.TagFromListAndPushToCHarbor(allCmiiImageNameList, syncCondition.DirectHarborHost) + } + + // build result + + return imageSyncResult +} + +func concatAndUniformCmiiImage(fullImageList []string, cmiiImageList []string) []string { + + if cmiiImageList != nil || len(cmiiImageList) > 0 { + // cmiiImageList has content + if fullImageList == nil { + fullImageList = []string{} + } + + for _, cmiiImage := range cmiiImageList { + fullImageList = append(fullImageList, image2.CmiiHarborPrefix+cmiiImage) + } + } + + return fullImageList +} + +// A_DownloadCompressUpload DCU 镜像同步的前半部分,通常在35.71 LapPro执行,无需Bastion Mode +func A_DownloadCompressUpload(syncEntity *ImageSyncEntity, syncResult *ImageSyncResult) { + + // all image full name list need to download + fullNameList := syncEntity.DownloadCondition.FullNameImageList + + // Download + log.Info("[DCU] - DOWNLOAD START !") + if syncEntity.DownloadCondition.ShouldDownloadImage && fullNameList != nil && len(fullNameList) > 0 { + + syncResult.DownloadResult.ErrorPullImageList = image.PullFromFullNameList(fullNameList) + + // remove failed download image from full name list + fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool { + return slices.Contains(syncResult.DownloadResult.ErrorPullImageList, imageName) + }) + + } else { + log.Info("[DCU] - No Need To Download !") + } + + syncResult.ProcedureSuccessImageList = fullNameList + + gzipLocalFolderPath := syncEntity.CompressCondition.GzipLocalFolder + + localGzipFileListTxt := filepath.Join(gzipLocalFolderPath, AllGzipImageLocalFileName) + + // Compress + if syncEntity.CompressCondition.ShouldCompressImageToGzip { + + // remove file + _ = os.Remove(localGzipFileListTxt) + + // 找到已经存在的压缩文件,跳过 + gzipFileAlready := make(map[string]bool) + if utils.FileOrFolderExists(gzipLocalFolderPath) { + dir, _ := os.ReadDir(gzipLocalFolderPath) + for _, entry := range dir { + if entry.IsDir() { + continue + } + gzipFileAlready[strings.TrimPrefix(entry.Name(), gzipLocalFolderPath)] = true + } + } + + // mkdir folder + err := os.MkdirAll(gzipLocalFolderPath, os.ModeDir) + if err != nil { + if !errors.Is(err, os.ErrExist) { + log.ErrorF("create folder error of %s", gzipLocalFolderPath) + panic(err) + } + } + + // 循环遍历压缩 + log.Info("[DCU] - COMPRESS START") + var errorGzipImageList []string + var allGzipFileFullNameList []string + + if syncEntity.CompressCondition.ShouldGzipSplit { + // 独立压缩 + for _, imageFullName := range fullNameList { + + // gzip image file already exists + gzipFileName := image2.ImageFullNameToGzipFileName(imageFullName) + gzipImageFileFullPath := gzipLocalFolderPath + gzipFileName + _, ok := gzipFileAlready[gzipFileName] + if len(gzipFileAlready) > 0 && ok { + log.DebugF("gzip file %s already exists !", gzipFileName) + } else { + ok, gzipImageFileFullPath = image.SaveToGzipFile(imageFullName, gzipLocalFolderPath) + if !ok { + errorGzipImageList = append(errorGzipImageList, imageFullName) + continue + } + } + + // 压缩成功 + allGzipFileFullNameList = append(allGzipFileFullNameList, gzipImageFileFullPath) + + } + + syncResult.CompressResult.SuccessGzipImageList = allGzipFileFullNameList + syncResult.CompressResult.ErrorGzipImageList = errorGzipImageList + + // remove failed + fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool { + return slices.Contains(errorGzipImageList, imageName) + }) + + // write all gzipped file name to file + for _, gzipFileFullName := range allGzipFileFullNameList { + utils.AppendContentToFile( + strings.TrimPrefix(strings.TrimPrefix(gzipFileFullName, gzipLocalFolderPath), "/")+"\n", + localGzipFileListTxt, + ) + } + + } else { + // 压缩为一个大的压缩包 + gzipFileName := generateMonolithicGzipFileName(syncEntity) + + ok, gzipFileFullPath, errorGzipImageListTmp := image.SaveImageListToGzipFile(fullNameList, gzipLocalFolderPath, gzipFileName) + if !ok { + panic("[DCU] - gzip error to a monolithic file !") + } + + // write all gzipped file name to file + utils.AppendOverwriteListContentToFile(fullNameList, localGzipFileListTxt) + + // remove failed + fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool { + return slices.Contains(errorGzipImageListTmp, imageName) + }) + + syncResult.CompressResult.SuccessGzipImageList = fullNameList + syncResult.CompressResult.ErrorGzipImageList = errorGzipImageListTmp + + log.InfoF("[DCU] - gzip all image from list to monolithic file %s", gzipFileFullPath) + } + + syncResult.CompressResult.GzipTxtFileLocalFullPath = localGzipFileListTxt + + log.InfoF("[DCU] - all gzip file name list is %s", localGzipFileListTxt) + } + + syncResult.ProcedureSuccessImageList = fullNameList + + // Upload + if syncEntity.UploadCondition.ShouldUploadToDemoMinio { + //uploadGzipFileToDemoMinio() + // get gzip file name list + log.Info("[DCU] - UPLOAD OSS START !") + + if !syncEntity.CompressCondition.ShouldCompressImageToGzip { + // 没有压缩指令 直接上传已有的内容 + allFileInGzipFile, err := utils.ListAllFileInFolderWithFullPath(gzipLocalFolderPath) + if err != nil { + log.ErrorF("[DCU] - list all gzip file error !") + return + } + for _, f := range allFileInGzipFile { + if strings.HasSuffix(f, "tar.gz") { + syncResult.CompressResult.SuccessGzipImageList = append(syncResult.CompressResult.SuccessGzipImageList, f) + } + } + + } + + var errorUploadOssGzipNameList []string + var allDownloadUrl []string + + // start to upload + // extract demo oss location suffix from gzipFolderFullPath + // 根据本地保存Gzip的目录路径提取到 相应的后缀 项目代码 + // projectName / projectVersion + projectUniqueName := strings.TrimPrefix(gzipLocalFolderPath, image.OfflineImageGzipFolderPrefix) + projectUniqueName = strings.TrimSuffix(projectUniqueName, "/") + + bucketNameWithPrefix := "cmlc-installation/" + projectUniqueName + log.InfoF("gzip file location in demo oss is %s", DefaultDemoEndpoint+"/"+bucketNameWithPrefix) + + // 上传所有的压缩文件名称 + if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipLocalFolderPath, AllGzipImageLocalFileName) { + log.ErrorF("upload of %s to demo oss error !", AllGzipImageLocalFileName) + } + + // 上传所有的镜像名称 + if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipLocalFolderPath, AllCmiiImageListLocalFileName) { + log.ErrorF("upload of %s to demo oss error !", AllCmiiImageListLocalFileName) + } + + log.InfoF("upload all gzip file to demo oss !") + for _, gzipFileFullName := range syncResult.CompressResult.SuccessGzipImageList { + // SaveToGzipFile 返回的是全路径 归一化处理 gzip file name + gzipFileName := strings.TrimPrefix(gzipFileFullName, gzipLocalFolderPath) + gzipFileName = strings.TrimPrefix(gzipFileName, "/") + + if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipLocalFolderPath, gzipFileName) { + log.ErrorF("upload of %s to demo oss error !", gzipFileName) + errorUploadOssGzipNameList = append(errorUploadOssGzipNameList, gzipFileName) + } else { + allDownloadUrl = append(allDownloadUrl, DefaultDemoEndpoint+"/"+bucketNameWithPrefix+"/"+gzipFileName) + } + } + + syncResult.UploadResult.AllDownloadUrl = allDownloadUrl + syncResult.UploadResult.ErrorUploadImageList = errorUploadOssGzipNameList + + } + + utils.AppendContentToFile(utils.BeautifulPrintToString(syncResult), filepath.Join(gzipLocalFolderPath, utils.TimeSplitFormatString()+".json")) +} + +func generateMonolithicGzipFileName(syncEntity *ImageSyncEntity) string { + + return strings.TrimPrefix(syncEntity.CompressCondition.GzipLocalFolder, image.OfflineImageGzipFolderPrefix) + ".tar.gz" +} + +// A_DownloadLoadTagUpload DLTU procedure ImageSync的另外一般流程,需要支持 堡垒机(纯离线)的模式 +// 2. Gzip文件目录,RKE MIDDLE CMII三个文件目录 - 约定目录 +// 约定目录 /root/wdd/image/rke/ /root/wdd/image/middle/ /root/wdd/image/cmii/ +// 3. 读取本机的IP地址 - 参数传递 +// 4. OSS地址 - ossUrlPrefix传空 则使用默认值 +// 5. ossFileName - 如果结尾为txt,则为文件的形式,如果为tar.gz,则为gzip文件夹的形式 +func A_DownloadLoadTagUpload(downloadFromOss bool, ossUrlPrefix, ossFileNameOrGzipFileListTxt, localGzipFolderOrGzipFile string, targetHarborFullName string) (targetImageFullNameList []string) { + + // 支持单文件的形式 + if !utils.IsDirOrFile(localGzipFolderOrGzipFile) { + // 单个压缩文件 肯定是离线的形式 + if !strings.HasSuffix(localGzipFolderOrGzipFile, ".tar.gz") { + log.ErrorF("local gzip file %s is not a .tar.gz file !", localGzipFolderOrGzipFile) + return nil + } + + // load + image.LoadFromGzipFilePath(localGzipFolderOrGzipFile) + } else { + separator := os.PathSeparator + if !strings.HasSuffix(localGzipFolderOrGzipFile, string(separator)) { + localGzipFolderOrGzipFile += string(separator) + } + + // download + if downloadFromOss { + if !parseAndDownloadFromOss(ossUrlPrefix, ossFileNameOrGzipFileListTxt, localGzipFolderOrGzipFile) { + log.ErrorF("download from oss error !") + return nil + } + } + + // load + loadAllGzipImageFromLocalFolder(localGzipFolderOrGzipFile) + } + + // tag + // push + allFileInFolder, err := utils.ListAllFileInFolder(localGzipFolderOrGzipFile) + if err != nil { + return nil + } + for _, gzipFileName := range allFileInFolder { + // 过滤非.tar.gz结尾的文件 + if !strings.HasSuffix(gzipFileName, ".tar.gz") { + continue + } + + log.DebugF("gzip file name is %s", gzipFileName) + + // gzip to image full name 拿到镜像的原始名称 + imageFullName := image2.GzipFileNameToImageFullName(gzipFileName) + if imageFullName == "" { + log.ErrorF("gzip file %s to image full name error !", gzipFileName) + continue + } + + // tag 拿到目标名称 然后重新Tag + targetImageFullName := image2.ImageNameToTargetImageFullName(imageFullName, targetHarborFullName) + image.TagFromSourceToTarget(imageFullName, targetImageFullName) + + // uploadToHarbor 上传到目标Harbor + if image.UploadToHarbor(targetImageFullName) { + targetImageFullNameList = append(targetImageFullNameList, targetImageFullName) + } else { + log.ErrorF("upload to harbor error of %s", targetImageFullName) + } + } + + return targetImageFullNameList +} + +func loadAllGzipImageFromLocalFolder(localGzipFolder string) { + image.LoadFromFolderPath(localGzipFolder) +} + +func parseAndDownloadFromOss(ossUrlPrefix, ossFileName, localGzipFolder string) bool { + + if ossUrlPrefix == "" { + ossUrlPrefix = DefaultOssUrlPrefix + } + if !strings.HasSuffix(ossUrlPrefix, "/") { + ossUrlPrefix += "/" + } + + log.InfoF("prepare to download from %s%s", ossUrlPrefix, ossFileName) + + if !DefaultCmiiMinioOperator.DemoMinioOperator.DownloadFileFromOssFullUrl(ossUrlPrefix+ossFileName, localGzipFolder) { + log.ErrorF("download %s from oss error !", ossUrlPrefix+ossFileName) + return false + } + + if strings.HasSuffix(ossFileName, ".txt") { + // download from gzip file list txt + // download all files in the txt file + result := utils.ReadAllContentFromFile(localGzipFolder + ossFileName) + for _, gzipFileName := range result { + DefaultCmiiMinioOperator.DemoMinioOperator.DownloadFileFromOssFullUrl(ossUrlPrefix+gzipFileName, localGzipFolder) + } + } + + // 解析 + return true +} + +// C_DownloadCompressUploadFromDemo 获取DEMO环境的全部镜像 +func C_DownloadCompressUploadFromDemo(syncEntity *ImageSyncEntity, syncResult *ImageSyncResult) { + + // generate a project folder + projectName := syncEntity.DownloadCondition.ProjectName + gzipFolderLocalPath := filepath.Join(image.OfflineImageGzipFolderPrefix, projectName) + + err := os.MkdirAll(gzipFolderLocalPath, os.ModeDir) + if err != nil { + if !errors.Is(err, os.ErrExist) { + log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", gzipFolderLocalPath, err.Error()) + + } + } + + // get demo image version map + allCmiiImageFullNameList := buildAllCmiiImageNameListFromDemo(projectName) + + // save all cmii image to file + allPullImageNameTxtFileName := filepath.Join(gzipFolderLocalPath, AllCmiiImageListLocalFileName) + utils.AppendOverwriteListContentToFile(allCmiiImageFullNameList, allPullImageNameTxtFileName) + + syncEntity.CompressCondition.GzipLocalFolder = gzipFolderLocalPath + syncEntity.DownloadCondition.FullNameImageList = allCmiiImageFullNameList + + // save to result + syncResult.DownloadResult.SuccessPullTxtFileLocalFullPath = allPullImageNameTxtFileName + + // do work + // DCU + A_DownloadCompressUpload(syncEntity, syncResult) +} + +func buildAllCmiiImageNameListFromDemo(projectName string) []string { + + var realCmiiImageName []string + + backendMap, frontendMap, srsMap := BackupAllCmiiDeploymentToMap(demo) + + // save map to file + backendMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-backend-app.json" + frontendMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-frontend-app.json" + srsMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-srs-app.json" + _ = os.Remove(backendMapFile) + _ = os.Remove(frontendMapFile) + _ = os.Remove(srsMapFile) + + //utils.AppendContentToFile( + // utils.BeautifulPrintToString(backendMap), + // backendMapFile, + //) + //utils.AppendContentToFile( + // utils.BeautifulPrintToString(frontendMap), + // frontendMapFile, + //) + //utils.AppendContentToFile( + // utils.BeautifulPrintToString(srsMapFile), + // srsMapFile, + //) + + realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(backendMap)...) + realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(frontendMap)...) + realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(srsMap)...) + + utils.BeautifulPrintListWithTitle(realCmiiImageName, "Cmii Project Image => "+projectName) + + return realCmiiImageName +} + +// C_DownloadCompressUploadFromVersion 根据版本下载全部的CMII镜像 +func C_DownloadCompressUploadFromVersion(syncEntity *ImageSyncEntity, syncResult *ImageSyncResult) { + + // generate a project folder + projectCmiiVersion := syncEntity.DownloadCondition.ProjectVersion + + // gzip local path + gzipFolderLocalPath := filepath.Join(image.OfflineImageGzipFolderPrefix, projectCmiiVersion) + + err := os.MkdirAll(gzipFolderLocalPath, os.ModeDir) + if err != nil { + if !errors.Is(err, os.ErrExist) { + log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", gzipFolderLocalPath, err.Error()) + } + } + + syncEntity.CompressCondition.GzipLocalFolder = gzipFolderLocalPath + + // build all cmii image name list + allCmiiImageFullNameList := buildAllCmiiImageNameListFromVersion(projectCmiiVersion) + + // save all cmii image to file + allImageListTxtFileFullName := filepath.Join(gzipFolderLocalPath, AllCmiiImageListLocalFileName) + utils.AppendOverwriteContentToFile(utils.BeautifulPrintToString(allCmiiImageFullNameList), allImageListTxtFileFullName) + + // save to result + syncResult.DownloadResult.SuccessPullTxtFileLocalFullPath = allImageListTxtFileFullName + + // do work + // DCU procedure + A_DownloadCompressUpload(syncEntity, syncResult) +} + +// buildAllCmiiImageNameListFromVersion 根据VersionTag构建完整的应用名称列表 +func buildAllCmiiImageNameListFromVersion(cmiiVersion string) []string { + + var realCmiiImageName []string + + backendMap := d_app.CmiiBackendAppMap + frontendMap := d_app.CmiiFrontendAppMap + + for app := range backendMap { + backendMap[app] = cmiiVersion + } + for app := range frontendMap { + frontendMap[app] = cmiiVersion + } + + realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(backendMap)...) + realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(frontendMap)...) + + for key, value := range d_app.CmiiSrsAppMap { + var app *CmiiDeploymentInterface + if strings.Contains(value, "deployment") { + app = DefaultCmiiOperator.DeploymentOneInterface(demo, key) + if app != nil { + realCmiiImageName = append(realCmiiImageName, app.Image) + } + } else if strings.Contains(value, "state") { + app = DefaultCmiiOperator.StatefulSetOneInterface(demo, key) + if app != nil { + for _, imageName := range app.ContainerImageMap { + realCmiiImageName = append(realCmiiImageName, imageName) + } + } + } + } + utils.BeautifulPrintListWithTitle(realCmiiImageName, "Cmii Version Image => "+cmiiVersion) + return realCmiiImageName +} + +// C_DownloadCompressUploadDependency DCU所有的依赖镜像 +func C_DownloadCompressUploadDependency(shouldGzip bool, shouldOss bool, isRKE bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList []string) { + + log.Info("DCU for middle and rke!") + err := os.MkdirAll(image.OfflineImageGzipFolderPrefix, os.ModeDir) + if err != nil { + if !errors.Is(err, os.ErrExist) { + log.ErrorF("[FetchDependencyRepos] - create folder of %s error %s", image.OfflineImageGzipFolderPrefix, err.Error()) + } + } + + var fullImageNameList []string + var gzipFolderPrefix string + + if isRKE { + log.Info("DCU for rke!") + fullImageNameList = d_app.Rancher1204Amd64 + gzipFolderPrefix = image.OfflineImageGzipFolderPrefix + "rke/" + } else { + log.Info("DCU for middle!") + + fullImageNameList = d_app.MiddlewareAmd64 + gzipFolderPrefix = image.OfflineImageGzipFolderPrefix + "middle/" + } + + syncEntity := &ImageSyncEntity{ + DownloadCondition: &DownloadEntity{ + ShouldDownloadImage: true, + ProjectName: "", + ProjectVersion: "", + CmiiNameTagList: nil, + FullNameImageList: fullImageNameList, + DownloadAuthUserName: "", + DownloadAuthPassword: "", + }, + CompressCondition: &CompressEntity{ + ShouldCompressImageToGzip: shouldGzip, + ShouldGzipSplit: true, + GzipLocalFolder: gzipFolderPrefix, + }, + UploadCondition: &UploadEntity{ShouldUploadToDemoMinio: shouldOss}, + ShouldDownloadFromOss: false, + ShouldUpdateImageTag: false, + ShouldDirectPushToHarbor: false, + DirectHarborHost: "", + } + + syncResult := &ImageSyncResult{ + ProcedureSuccessImageList: nil, + DownloadResult: &DownloadResultEntity{ + ErrorPullImageList: nil, + SuccessPullImageList: nil, + SuccessPullTxtFileLocalFullPath: "", + }, + CompressResult: &CompressResultEntity{ + ErrorGzipImageList: nil, + SuccessGzipImageList: nil, + GzipTxtFileLocalFullPath: "", + }, + UploadResult: &UploadResultEntity{ + ErrorUploadImageList: nil, + AllDownloadUrl: nil, + }, + } + + utils.AppendOverwriteListContentToFile(fullImageNameList, filepath.Join(gzipFolderPrefix, AllCmiiImageListLocalFileName)) + + A_DownloadCompressUpload(syncEntity, syncResult) + + return syncResult.DownloadResult.ErrorPullImageList, syncResult.CompressResult.ErrorGzipImageList, syncResult.ProcedureSuccessImageList, syncResult.CompressResult.SuccessGzipImageList +} + +func LoadSplitCmiiGzipImageToTargetHarbor(projectName, targetHarborHost string) (errorLoadImageNameList, errorPushImageNameList []string) { + + // list folder + projectGzipFolder := image.OfflineImageGzipFolderPrefix + projectName + errorLoadImageNameList = append(errorLoadImageNameList, image.LoadFromFolderPath(projectGzipFolder)...) + // read from json + errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(d_app.Cmii520DemoImageList, targetHarborHost)...) + + // re-tag + // push + + // todo clean host and harbor + // check harbor exits + + return errorLoadImageNameList, errorPushImageNameList +} + +func LoadSplitDepGzipImageToTargetHarbor(targetHarborHost string) (errorLoadImageNameList []string, errorPushImageNameList []string) { + + errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(d_app.MiddlewareAmd64, targetHarborHost)...) + //errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(d_app.Rancher1204Amd64, targetHarborHost)...) + + return errorLoadImageNameList, errorPushImageNameList + +} diff --git a/agent-operator/ImageSyncOperator.svg b/agent-operator/ImageSyncOperator.svg new file mode 100644 index 0000000..12f6d29 --- /dev/null +++ b/agent-operator/ImageSyncOperator.svg @@ -0,0 +1,582 @@ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ Download +
+
+
+
+ Download + +
+
+
+
+ + + + + + + + +
+
+
+ Compress +
+
+
+
+ Compress + +
+
+
+
+ + + + + + + + +
+
+
+ Upload +
+
+
+
+ Upload + +
+
+
+
+ + + + + + + + +
+
+
+ DEMO +
+
+
+
+ DEMO + +
+
+
+
+ + + + + + + + +
+
+
+ VersionTag +
+
+
+
+ VersionTag + +
+
+
+
+ + + + + + + + +
+
+
+ Split +
+
+
+
+ Split + +
+
+
+
+ + + + + + + + +
+
+
+ monolithic +
+
+
+
+ monolithic + +
+
+
+
+ + + + + + + + +
+
+
+ Path +
+
+
+
+ Path + +
+
+
+
+ + + + + + + + + + + + + + + +
+
+
+ should +
+
+
+
+ should + +
+
+
+
+
+ + + + + + + + +
+
+
+ AUTH +
+
+
+
+ AUTH + +
+
+
+
+ + + + + + + + +
+
+
+ Local Path +
+
+
+
+ Local Path + +
+
+
+
+ + + + + + + + + + + + + +
+
+
+
+
+                                                        
+                                                            ErrorPullImageList
+                                                            
+                                                                
+
+
+
+
+
+
+
+
+ ErrorPullImageList + +
+
+
+
+ + + + + + + + +
+
+
+
+
+                                                        
+                                                            
+                                                                ErrorGzipImageList
+                                                            
+                                                            
+                                                                
+
+
+
+
+
+
+
+
+ ErrorGzipImageList + +
+
+
+
+ + + + + + + + +
+
+
+ GzipLocalFolder +
+
+
+
+ GzipLocalFolder + +
+
+
+
+ + + + + + + + +
+
+
+
+                                                    ProcedureSuccessImageList
+                                                
+
+
+
+
+ ProcedureSuccessImageList + +
+
+
+
+ + + + + + + + +
+
+
+
+ SuccessPulledImagList + +
+
+
+ +
+
+
+
[FILE]- GzipLocalFolder+name + +
+
+
+
+
+
+
+ SuccessPulledImagList... + +
+
+
+
+ + + + + + + + +
+
+
+ AllDownloadUrl +
+
+
+
+ AllDownloadUrl + +
+
+
+
+ + + + + + + + +
+
+
+ ErrorUploadImageList +
+
+
+
+ ErrorUploadImageList + +
+
+
+
+
+
+
+ + + + Text is not SVG - cannot display + + +
\ No newline at end of file diff --git a/agent-operator/CmiiOperator_test.go b/agent-operator/ImageSyncOperator_test.go similarity index 79% rename from agent-operator/CmiiOperator_test.go rename to agent-operator/ImageSyncOperator_test.go index ae88876..0caa615 100755 --- a/agent-operator/CmiiOperator_test.go +++ b/agent-operator/ImageSyncOperator_test.go @@ -11,7 +11,7 @@ import ( func TestFetchDependencyRepos_Middle(t *testing.T) { - errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, true, false, false) + errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, false, false) utils.BeautifulPrintListWithTitle(errorPullImageList, "errorPullImageList") utils.BeautifulPrintListWithTitle(errorGzipImageList, "errorGzipImageList") @@ -22,7 +22,7 @@ func TestFetchDependencyRepos_Middle(t *testing.T) { func TestFetchDependencyRepos_RKE(t *testing.T) { - errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, true, false, true) + errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, true, false) utils.BeautifulPrintListWithTitle(errorPullImageList, "errorPullImageList") utils.BeautifulPrintListWithTitle(errorGzipImageList, "errorGzipImageList") @@ -48,17 +48,28 @@ func TestLoadSplitDepGzipImageToTargetHarbor(t *testing.T) { func TestPullFromEntityAndSyncConditionally(t *testing.T) { // 创建一个模拟的sync对象,用于测试函数的行为。这里需要根据你的实际需求来设置mock数据和预期结果。 sync := ImageSyncEntity{ - CmiiNameTagList: []string{ - //"cmii-uav-mqtthandler:5.4.0-bjdyt-052102", + DownloadCondition: &DownloadEntity{ + ShouldDownloadImage: true, + ProjectName: "bjyd", + ProjectVersion: "", + CmiiNameTagList: []string{ + //"cmii-uav-mqtthandler:5.4.0-bjdyt-052102", + }, + FullNameImageList: nil, + DownloadAuthUserName: "", + DownloadAuthPassword: "", }, - FullNameImageList: nil, - ProjectVersion: "", - ProjectName: "cqsh", - DirectHarborHost: "chongqingcis-9b4a3da9.ecis.chongqing-1.cmecloud.cn", - ShouldDownloadImage: true, - ShouldCompressImageToGzip: false, - ShouldUploadToDemoMinio: false, - ShouldDirectPushToHarbor: true, + + CompressCondition: &CompressEntity{ + ShouldCompressImageToGzip: true, + ShouldGzipSplit: true, + GzipLocalFolder: "", + }, + UploadCondition: &UploadEntity{ + ShouldUploadToDemoMinio: true, + }, + DirectHarborHost: "harbor.wdd.io:8033", + ShouldDirectPushToHarbor: false, } // 调用函数并获取结果。这里需要根据你的实际需求来验证返回的结果是否符合预期。 @@ -67,9 +78,6 @@ func TestPullFromEntityAndSyncConditionally(t *testing.T) { utils.BeautifulPrint(result) // 添加断言以检查函数的输出,例如: - if len(result.ErrorPullImageList) != 0 { - t.Errorf("Expected no error pulling images, got %v", result.ErrorPullImageList) - } // ...其他验证逻辑... } @@ -110,7 +118,15 @@ func TestConcatAndUniformCmiiImage(t *testing.T) { func TestImageSyncEntity_PullFromEntityAndSyncConditionally(t *testing.T) { imageSyncEntity := ImageSyncEntity{ - ProjectVersion: "5.4.0", + DownloadCondition: &DownloadEntity{ + ShouldDownloadImage: true, + ProjectName: "", + ProjectVersion: "5.4.0", + CmiiNameTagList: nil, + FullNameImageList: nil, + DownloadAuthUserName: "", + DownloadAuthPassword: "", + }, DirectHarborHost: "36.134.71.138", } diff --git a/agent-operator/K8sOperator.go b/agent-operator/K8sOperator.go index 52fa710..29b702b 100755 --- a/agent-operator/K8sOperator.go +++ b/agent-operator/K8sOperator.go @@ -504,6 +504,7 @@ func (op *CmiiK8sOperator) DeploymentUpdateTagByImageFullName(cmiiEnv, imageFull return op.DeploymentUpdateTag(cmiiEnv, appName, newTag) } +// DeploymentUpdateTag 更新一个Deployment的Tag,返回true或者false。 同时更新IMAGE_VERSION BIZ_GROUP func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string) bool { if newTag == "" { @@ -545,15 +546,15 @@ func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string) tagVersion = strings.Split(newTag, "-")[0] } envList := container.Env - for _, envVar := range envList { + for index, envVar := range envList { if envVar.Name == "IMAGE_VERSION" { - envVar.Value = tagVersion + envList[index].Value = tagVersion } if envVar.Name == "BIZ_CONFIG_GROUP" { - envVar.Value = tagVersion + envList[index].Value = tagVersion } if envVar.Name == "SYS_CONFIG_GROUP" { - envVar.Value = tagVersion + envList[index].Value = tagVersion } } log.DebugF("[DeploymentUpdateTag] - update env IMAGE_VERSION to [%s]", tagVersion) diff --git a/agent-operator/K8sOperator_test.go b/agent-operator/K8sOperator_test.go index 1f65b8a..4ee0745 100755 --- a/agent-operator/K8sOperator_test.go +++ b/agent-operator/K8sOperator_test.go @@ -84,7 +84,7 @@ func TestCmiiK8sOperator_DeploymentScale(t *testing.T) { func TestCmiiK8sOperator_DeploymentUpdateTag(t *testing.T) { start := time.Now() - DefaultCmiiOperator.DeploymentUpdateTag("demo", "cmii-uav-platform", "5.2.0-011001") + DefaultCmiiOperator.DeploymentUpdateTag("demo", "cmii-uav-gateway", "5.7.0") elapsed := time.Since(start).Milliseconds() fmt.Printf("执行耗时: %d ms\n", elapsed) } diff --git a/agent-operator/deploy/z_bjtg/k8s-dashboard.yaml b/agent-operator/deploy/z_bjtg/k8s-dashboard.yaml deleted file mode 100755 index 430a581..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-dashboard.yaml +++ /dev/null @@ -1,1842 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - name: kubernetes-dashboard - namespace: kube-system -spec: - ports: - - port: 443 - targetPort: 8443 - nodePort: 30554 - selector: - k8s-app: kubernetes-dashboard - type: NodePort - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-certs - namespace: kube-system -type: Opaque - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-csrf - namespace: kube-system -type: Opaque -data: - csrf: "" - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-key-holder - namespace: kube-system -type: Opaque - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-settings - namespace: kube-system - ---- - -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -rules: - # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - - apiGroups: [ "" ] - resources: [ "secrets" ] - resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] - verbs: [ "get", "update", "delete" ] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - - apiGroups: [ "" ] - resources: [ "configmaps" ] - resourceNames: [ "kubernetes-dashboard-settings" ] - verbs: [ "get", "update" ] - # Allow Dashboard to get metrics. - - apiGroups: [ "" ] - resources: [ "services" ] - resourceNames: [ "heapster", "dashboard-metrics-scraper" ] - verbs: [ "proxy" ] - - apiGroups: [ "" ] - resources: [ "services/proxy" ] - resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] - verbs: [ "get" ] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard -rules: - # Allow Metrics Scraper to get metrics from the Metrics server - - apiGroups: [ "metrics.k8s.io" ] - resources: [ "pods", "nodes" ] - verbs: [ "get", "list", "watch" ] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - spec: - containers: - - name: kubernetes-dashboard - image: 10.250.0.110:8033/cmii/dashboard:v2.0.1 - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - - --namespace=kube-system - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: { } - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - ports: - - port: 8000 - targetPort: 8000 - selector: - k8s-app: dashboard-metrics-scraper - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: dashboard-metrics-scraper - template: - metadata: - labels: - k8s-app: dashboard-metrics-scraper - annotations: - seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' - spec: - containers: - - name: dashboard-metrics-scraper - image: 10.250.0.110:8033/cmii/metrics-scraper:v1.0.4 - ports: - - containerPort: 8000 - protocol: TCP - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 8000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - volumes: - - name: tmp-volume - emptyDir: { } ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: admin-user - namespace: kube-system ---- -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - name: kubernetes-dashboard - namespace: kube-system -spec: - ports: - - port: 443 - targetPort: 8443 - nodePort: 30554 - selector: - k8s-app: kubernetes-dashboard - type: NodePort - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-certs - namespace: kube-system -type: Opaque - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-csrf - namespace: kube-system -type: Opaque -data: - csrf: "" - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-key-holder - namespace: kube-system -type: Opaque - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-settings - namespace: kube-system - ---- - -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -rules: - # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - - apiGroups: [ "" ] - resources: [ "secrets" ] - resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] - verbs: [ "get", "update", "delete" ] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - - apiGroups: [ "" ] - resources: [ "configmaps" ] - resourceNames: [ "kubernetes-dashboard-settings" ] - verbs: [ "get", "update" ] - # Allow Dashboard to get metrics. - - apiGroups: [ "" ] - resources: [ "services" ] - resourceNames: [ "heapster", "dashboard-metrics-scraper" ] - verbs: [ "proxy" ] - - apiGroups: [ "" ] - resources: [ "services/proxy" ] - resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] - verbs: [ "get" ] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard -rules: - # Allow Metrics Scraper to get metrics from the Metrics server - - apiGroups: [ "metrics.k8s.io" ] - resources: [ "pods", "nodes" ] - verbs: [ "get", "list", "watch" ] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - spec: - containers: - - name: kubernetes-dashboard - image: 10.250.0.110:8033/cmii/dashboard:v2.0.1 - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - - --namespace=kube-system - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: { } - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - ports: - - port: 8000 - targetPort: 8000 - selector: - k8s-app: dashboard-metrics-scraper - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: dashboard-metrics-scraper - template: - metadata: - labels: - k8s-app: dashboard-metrics-scraper - annotations: - seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' - spec: - containers: - - name: dashboard-metrics-scraper - image: 10.250.0.110:8033/cmii/metrics-scraper:v1.0.4 - ports: - - containerPort: 8000 - protocol: TCP - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 8000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - volumes: - - name: tmp-volume - emptyDir: { } ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: admin-user - namespace: kube-system ---- -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - name: kubernetes-dashboard - namespace: kube-system -spec: - ports: - - port: 443 - targetPort: 8443 - nodePort: 30554 - selector: - k8s-app: kubernetes-dashboard - type: NodePort - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-certs - namespace: kube-system -type: Opaque - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-csrf - namespace: kube-system -type: Opaque -data: - csrf: "" - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-key-holder - namespace: kube-system -type: Opaque - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-settings - namespace: kube-system - ---- - -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -rules: - # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - - apiGroups: [ "" ] - resources: [ "secrets" ] - resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] - verbs: [ "get", "update", "delete" ] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - - apiGroups: [ "" ] - resources: [ "configmaps" ] - resourceNames: [ "kubernetes-dashboard-settings" ] - verbs: [ "get", "update" ] - # Allow Dashboard to get metrics. - - apiGroups: [ "" ] - resources: [ "services" ] - resourceNames: [ "heapster", "dashboard-metrics-scraper" ] - verbs: [ "proxy" ] - - apiGroups: [ "" ] - resources: [ "services/proxy" ] - resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] - verbs: [ "get" ] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard -rules: - # Allow Metrics Scraper to get metrics from the Metrics server - - apiGroups: [ "metrics.k8s.io" ] - resources: [ "pods", "nodes" ] - verbs: [ "get", "list", "watch" ] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - spec: - containers: - - name: kubernetes-dashboard - image: 10.250.0.110:8033/cmii/dashboard:v2.0.1 - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - - --namespace=kube-system - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: { } - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - ports: - - port: 8000 - targetPort: 8000 - selector: - k8s-app: dashboard-metrics-scraper - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: dashboard-metrics-scraper - template: - metadata: - labels: - k8s-app: dashboard-metrics-scraper - annotations: - seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' - spec: - containers: - - name: dashboard-metrics-scraper - image: 10.250.0.110:8033/cmii/metrics-scraper:v1.0.4 - ports: - - containerPort: 8000 - protocol: TCP - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 8000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - volumes: - - name: tmp-volume - emptyDir: { } ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: admin-user - namespace: kube-system ---- -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - name: kubernetes-dashboard - namespace: kube-system -spec: - ports: - - port: 443 - targetPort: 8443 - nodePort: 30554 - selector: - k8s-app: kubernetes-dashboard - type: NodePort - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-certs - namespace: kube-system -type: Opaque - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-csrf - namespace: kube-system -type: Opaque -data: - csrf: "" - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-key-holder - namespace: kube-system -type: Opaque - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-settings - namespace: kube-system - ---- - -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -rules: - # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - - apiGroups: [ "" ] - resources: [ "secrets" ] - resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] - verbs: [ "get", "update", "delete" ] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - - apiGroups: [ "" ] - resources: [ "configmaps" ] - resourceNames: [ "kubernetes-dashboard-settings" ] - verbs: [ "get", "update" ] - # Allow Dashboard to get metrics. - - apiGroups: [ "" ] - resources: [ "services" ] - resourceNames: [ "heapster", "dashboard-metrics-scraper" ] - verbs: [ "proxy" ] - - apiGroups: [ "" ] - resources: [ "services/proxy" ] - resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] - verbs: [ "get" ] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard -rules: - # Allow Metrics Scraper to get metrics from the Metrics server - - apiGroups: [ "metrics.k8s.io" ] - resources: [ "pods", "nodes" ] - verbs: [ "get", "list", "watch" ] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - spec: - containers: - - name: kubernetes-dashboard - image: 10.250.0.110:8033/cmii/dashboard:v2.0.1 - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - - --namespace=kube-system - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: { } - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - ports: - - port: 8000 - targetPort: 8000 - selector: - k8s-app: dashboard-metrics-scraper - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: dashboard-metrics-scraper - template: - metadata: - labels: - k8s-app: dashboard-metrics-scraper - annotations: - seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' - spec: - containers: - - name: dashboard-metrics-scraper - image: 10.250.0.110:8033/cmii/metrics-scraper:v1.0.4 - ports: - - containerPort: 8000 - protocol: TCP - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 8000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - volumes: - - name: tmp-volume - emptyDir: { } ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: admin-user - namespace: kube-system ---- -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - name: kubernetes-dashboard - namespace: kube-system -spec: - ports: - - port: 443 - targetPort: 8443 - nodePort: 30554 - selector: - k8s-app: kubernetes-dashboard - type: NodePort - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-certs - namespace: kube-system -type: Opaque - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-csrf - namespace: kube-system -type: Opaque -data: - csrf: "" - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-key-holder - namespace: kube-system -type: Opaque - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-settings - namespace: kube-system - ---- - -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -rules: - # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - - apiGroups: [ "" ] - resources: [ "secrets" ] - resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] - verbs: [ "get", "update", "delete" ] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - - apiGroups: [ "" ] - resources: [ "configmaps" ] - resourceNames: [ "kubernetes-dashboard-settings" ] - verbs: [ "get", "update" ] - # Allow Dashboard to get metrics. - - apiGroups: [ "" ] - resources: [ "services" ] - resourceNames: [ "heapster", "dashboard-metrics-scraper" ] - verbs: [ "proxy" ] - - apiGroups: [ "" ] - resources: [ "services/proxy" ] - resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] - verbs: [ "get" ] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard -rules: - # Allow Metrics Scraper to get metrics from the Metrics server - - apiGroups: [ "metrics.k8s.io" ] - resources: [ "pods", "nodes" ] - verbs: [ "get", "list", "watch" ] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - spec: - containers: - - name: kubernetes-dashboard - image: 10.250.0.110:8033/cmii/dashboard:v2.0.1 - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - - --namespace=kube-system - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: { } - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - ports: - - port: 8000 - targetPort: 8000 - selector: - k8s-app: dashboard-metrics-scraper - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: dashboard-metrics-scraper - template: - metadata: - labels: - k8s-app: dashboard-metrics-scraper - annotations: - seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' - spec: - containers: - - name: dashboard-metrics-scraper - image: 10.250.0.110:8033/cmii/metrics-scraper:v1.0.4 - ports: - - containerPort: 8000 - protocol: TCP - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 8000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - volumes: - - name: tmp-volume - emptyDir: { } ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: admin-user - namespace: kube-system ---- -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - name: kubernetes-dashboard - namespace: kube-system -spec: - ports: - - port: 443 - targetPort: 8443 - nodePort: 30554 - selector: - k8s-app: kubernetes-dashboard - type: NodePort - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-certs - namespace: kube-system -type: Opaque - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-csrf - namespace: kube-system -type: Opaque -data: - csrf: "" - ---- - -apiVersion: v1 -kind: Secret -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-key-holder - namespace: kube-system -type: Opaque - ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard-settings - namespace: kube-system - ---- - -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -rules: - # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - - apiGroups: [ "" ] - resources: [ "secrets" ] - resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] - verbs: [ "get", "update", "delete" ] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - - apiGroups: [ "" ] - resources: [ "configmaps" ] - resourceNames: [ "kubernetes-dashboard-settings" ] - verbs: [ "get", "update" ] - # Allow Dashboard to get metrics. - - apiGroups: [ "" ] - resources: [ "services" ] - resourceNames: [ "heapster", "dashboard-metrics-scraper" ] - verbs: [ "proxy" ] - - apiGroups: [ "" ] - resources: [ "services/proxy" ] - resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] - verbs: [ "get" ] - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard -rules: - # Allow Metrics Scraper to get metrics from the Metrics server - - apiGroups: [ "metrics.k8s.io" ] - resources: [ "pods", "nodes" ] - verbs: [ "get", "list", "watch" ] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kubernetes-dashboard -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - spec: - containers: - - name: kubernetes-dashboard - image: 10.250.0.110:8033/cmii/dashboard:v2.0.1 - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - - --namespace=kube-system - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: { } - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - ---- - -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - ports: - - port: 8000 - targetPort: 8000 - selector: - k8s-app: dashboard-metrics-scraper - ---- - -kind: Deployment -apiVersion: apps/v1 -metadata: - labels: - k8s-app: dashboard-metrics-scraper - name: dashboard-metrics-scraper - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - k8s-app: dashboard-metrics-scraper - template: - metadata: - labels: - k8s-app: dashboard-metrics-scraper - annotations: - seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' - spec: - containers: - - name: dashboard-metrics-scraper - image: 10.250.0.110:8033/cmii/metrics-scraper:v1.0.4 - ports: - - containerPort: 8000 - protocol: TCP - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 8000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - volumeMounts: - - mountPath: /tmp - name: tmp-volume - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 2001 - serviceAccountName: kubernetes-dashboard - # Comment the following tolerations if Dashboard must not be deployed on master - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - volumes: - - name: tmp-volume - emptyDir: { } ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: admin-user - namespace: kube-system diff --git a/agent-operator/deploy/z_bjtg/k8s-emqx.yaml b/agent-operator/deploy/z_bjtg/k8s-emqx.yaml deleted file mode 100755 index 2ec8197..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-emqx.yaml +++ /dev/null @@ -1,1578 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-emqxs - namespace: bjtg ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-env - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 - EMQX_NAME: helm-emqxs - EMQX_CLUSTER__DISCOVERY: k8s - EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs - EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" - EMQX_CLUSTER__K8S__namespace: bjtg - EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local - EMQX_ALLOW_ANONYMOUS: "false" - EMQX_ACL_NOMATCH: "deny" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-cm - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - emqx_auth_username.conf: |- - auth.user.1.username = cmlc - auth.user.1.password = odD8#Ve7.B - auth.user.password_hash = sha256 - - acl.conf: |- - {allow, {user, "admin"}, pubsub, ["admin/#"]}. - {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. - {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. - {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. - {allow, all}. - - loaded_plugins: |- - {emqx_auth_username,true}. - {emqx_management, true}. - {emqx_recon, true}. - {emqx_retainer, false}. - {emqx_dashboard, true}. - {emqx_telemetry, true}. - {emqx_rule_engine, true}. - {emqx_bridge_mqtt, false}. ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - replicas: 1 - serviceName: helm-emqxs-headless - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - template: - metadata: - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - spec: - affinity: { } - serviceAccountName: helm-emqxs - containers: - - name: helm-emqxs - image: 10.250.0.110:8033/cmii/emqx:5.5.1 - imagePullPolicy: Always - ports: - - name: mqtt - containerPort: 1883 - - name: mqttssl - containerPort: 8883 - - name: mgmt - containerPort: 8081 - - name: ws - containerPort: 8083 - - name: wss - containerPort: 8084 - - name: dashboard - containerPort: 18083 - - name: ekka - containerPort: 4370 - envFrom: - - configMapRef: - name: helm-emqxs-env - resources: { } - volumeMounts: - - name: emqx-data - mountPath: "/opt/emqx/data/mnesia" - readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" - subPath: emqx_auth_username.conf - readOnly: false - # - name: helm-emqxs-cm - # mountPath: "/opt/emqx/etc/acl.conf" - # subPath: "acl.conf" - # readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/data/loaded_plugins" - subPath: loaded_plugins - readOnly: false - volumes: - - name: emqx-data - persistentVolumeClaim: - claimName: helm-emqxs - - name: helm-emqxs-cm - configMap: - name: helm-emqxs-cm - items: - - key: emqx_auth_username.conf - path: emqx_auth_username.conf - - key: acl.conf - path: acl.conf - - key: loaded_plugins - path: loaded_plugins ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -rules: - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - watch - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -subjects: - - kind: ServiceAccount - name: helm-emqxs - namespace: bjtg -roleRef: - kind: Role - name: helm-emqxs - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - port: 1883 - name: mqtt - targetPort: 1883 - nodePort: 31883 - - port: 18083 - name: dashboard - targetPort: 18083 - nodePort: 38085 - - port: 8083 - name: mqtt-websocket - targetPort: 8083 - nodePort: 38083 ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs-headless - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - clusterIP: None - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - name: mqtt - port: 1883 - protocol: TCP - targetPort: 1883 - - name: mqttssl - port: 8883 - protocol: TCP - targetPort: 8883 - - name: mgmt - port: 8081 - protocol: TCP - targetPort: 8081 - - name: websocket - port: 8083 - protocol: TCP - targetPort: 8083 - - name: wss - port: 8084 - protocol: TCP - targetPort: 8084 - - name: dashboard - port: 18083 - protocol: TCP - targetPort: 18083 - - name: ekka - port: 4370 - protocol: TCP - targetPort: 4370 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-emqxs - namespace: bjtg ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-env - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 - EMQX_NAME: helm-emqxs - EMQX_CLUSTER__DISCOVERY: k8s - EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs - EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" - EMQX_CLUSTER__K8S__namespace: bjtg - EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local - EMQX_ALLOW_ANONYMOUS: "false" - EMQX_ACL_NOMATCH: "deny" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-cm - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - emqx_auth_username.conf: |- - auth.user.1.username = cmlc - auth.user.1.password = odD8#Ve7.B - auth.user.password_hash = sha256 - - acl.conf: |- - {allow, {user, "admin"}, pubsub, ["admin/#"]}. - {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. - {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. - {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. - {allow, all}. - - loaded_plugins: |- - {emqx_auth_username,true}. - {emqx_management, true}. - {emqx_recon, true}. - {emqx_retainer, false}. - {emqx_dashboard, true}. - {emqx_telemetry, true}. - {emqx_rule_engine, true}. - {emqx_bridge_mqtt, false}. ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - replicas: 1 - serviceName: helm-emqxs-headless - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - template: - metadata: - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - spec: - affinity: { } - serviceAccountName: helm-emqxs - containers: - - name: helm-emqxs - image: 10.250.0.110:8033/cmii/emqx:5.5.1 - imagePullPolicy: Always - ports: - - name: mqtt - containerPort: 1883 - - name: mqttssl - containerPort: 8883 - - name: mgmt - containerPort: 8081 - - name: ws - containerPort: 8083 - - name: wss - containerPort: 8084 - - name: dashboard - containerPort: 18083 - - name: ekka - containerPort: 4370 - envFrom: - - configMapRef: - name: helm-emqxs-env - resources: { } - volumeMounts: - - name: emqx-data - mountPath: "/opt/emqx/data/mnesia" - readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" - subPath: emqx_auth_username.conf - readOnly: false - # - name: helm-emqxs-cm - # mountPath: "/opt/emqx/etc/acl.conf" - # subPath: "acl.conf" - # readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/data/loaded_plugins" - subPath: loaded_plugins - readOnly: false - volumes: - - name: emqx-data - persistentVolumeClaim: - claimName: helm-emqxs - - name: helm-emqxs-cm - configMap: - name: helm-emqxs-cm - items: - - key: emqx_auth_username.conf - path: emqx_auth_username.conf - - key: acl.conf - path: acl.conf - - key: loaded_plugins - path: loaded_plugins ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -rules: - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - watch - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -subjects: - - kind: ServiceAccount - name: helm-emqxs - namespace: bjtg -roleRef: - kind: Role - name: helm-emqxs - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - port: 1883 - name: mqtt - targetPort: 1883 - nodePort: 31883 - - port: 18083 - name: dashboard - targetPort: 18083 - nodePort: 38085 - - port: 8083 - name: mqtt-websocket - targetPort: 8083 - nodePort: 38083 ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs-headless - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - clusterIP: None - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - name: mqtt - port: 1883 - protocol: TCP - targetPort: 1883 - - name: mqttssl - port: 8883 - protocol: TCP - targetPort: 8883 - - name: mgmt - port: 8081 - protocol: TCP - targetPort: 8081 - - name: websocket - port: 8083 - protocol: TCP - targetPort: 8083 - - name: wss - port: 8084 - protocol: TCP - targetPort: 8084 - - name: dashboard - port: 18083 - protocol: TCP - targetPort: 18083 - - name: ekka - port: 4370 - protocol: TCP - targetPort: 4370 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-emqxs - namespace: bjtg ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-env - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 - EMQX_NAME: helm-emqxs - EMQX_CLUSTER__DISCOVERY: k8s - EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs - EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" - EMQX_CLUSTER__K8S__namespace: bjtg - EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local - EMQX_ALLOW_ANONYMOUS: "false" - EMQX_ACL_NOMATCH: "deny" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-cm - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - emqx_auth_username.conf: |- - auth.user.1.username = cmlc - auth.user.1.password = odD8#Ve7.B - auth.user.password_hash = sha256 - - acl.conf: |- - {allow, {user, "admin"}, pubsub, ["admin/#"]}. - {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. - {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. - {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. - {allow, all}. - - loaded_plugins: |- - {emqx_auth_username,true}. - {emqx_management, true}. - {emqx_recon, true}. - {emqx_retainer, false}. - {emqx_dashboard, true}. - {emqx_telemetry, true}. - {emqx_rule_engine, true}. - {emqx_bridge_mqtt, false}. ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - replicas: 1 - serviceName: helm-emqxs-headless - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - template: - metadata: - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - spec: - affinity: { } - serviceAccountName: helm-emqxs - containers: - - name: helm-emqxs - image: 10.250.0.110:8033/cmii/emqx:5.5.1 - imagePullPolicy: Always - ports: - - name: mqtt - containerPort: 1883 - - name: mqttssl - containerPort: 8883 - - name: mgmt - containerPort: 8081 - - name: ws - containerPort: 8083 - - name: wss - containerPort: 8084 - - name: dashboard - containerPort: 18083 - - name: ekka - containerPort: 4370 - envFrom: - - configMapRef: - name: helm-emqxs-env - resources: { } - volumeMounts: - - name: emqx-data - mountPath: "/opt/emqx/data/mnesia" - readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" - subPath: emqx_auth_username.conf - readOnly: false - # - name: helm-emqxs-cm - # mountPath: "/opt/emqx/etc/acl.conf" - # subPath: "acl.conf" - # readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/data/loaded_plugins" - subPath: loaded_plugins - readOnly: false - volumes: - - name: emqx-data - persistentVolumeClaim: - claimName: helm-emqxs - - name: helm-emqxs-cm - configMap: - name: helm-emqxs-cm - items: - - key: emqx_auth_username.conf - path: emqx_auth_username.conf - - key: acl.conf - path: acl.conf - - key: loaded_plugins - path: loaded_plugins ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -rules: - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - watch - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -subjects: - - kind: ServiceAccount - name: helm-emqxs - namespace: bjtg -roleRef: - kind: Role - name: helm-emqxs - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - port: 1883 - name: mqtt - targetPort: 1883 - nodePort: 31883 - - port: 18083 - name: dashboard - targetPort: 18083 - nodePort: 38085 - - port: 8083 - name: mqtt-websocket - targetPort: 8083 - nodePort: 38083 ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs-headless - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - clusterIP: None - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - name: mqtt - port: 1883 - protocol: TCP - targetPort: 1883 - - name: mqttssl - port: 8883 - protocol: TCP - targetPort: 8883 - - name: mgmt - port: 8081 - protocol: TCP - targetPort: 8081 - - name: websocket - port: 8083 - protocol: TCP - targetPort: 8083 - - name: wss - port: 8084 - protocol: TCP - targetPort: 8084 - - name: dashboard - port: 18083 - protocol: TCP - targetPort: 18083 - - name: ekka - port: 4370 - protocol: TCP - targetPort: 4370 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-emqxs - namespace: bjtg ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-env - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 - EMQX_NAME: helm-emqxs - EMQX_CLUSTER__DISCOVERY: k8s - EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs - EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" - EMQX_CLUSTER__K8S__namespace: bjtg - EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local - EMQX_ALLOW_ANONYMOUS: "false" - EMQX_ACL_NOMATCH: "deny" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-cm - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - emqx_auth_username.conf: |- - auth.user.1.username = cmlc - auth.user.1.password = odD8#Ve7.B - auth.user.password_hash = sha256 - - acl.conf: |- - {allow, {user, "admin"}, pubsub, ["admin/#"]}. - {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. - {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. - {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. - {allow, all}. - - loaded_plugins: |- - {emqx_auth_username,true}. - {emqx_management, true}. - {emqx_recon, true}. - {emqx_retainer, false}. - {emqx_dashboard, true}. - {emqx_telemetry, true}. - {emqx_rule_engine, true}. - {emqx_bridge_mqtt, false}. ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - replicas: 1 - serviceName: helm-emqxs-headless - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - template: - metadata: - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - spec: - affinity: { } - serviceAccountName: helm-emqxs - containers: - - name: helm-emqxs - image: 10.250.0.110:8033/cmii/emqx:5.5.1 - imagePullPolicy: Always - ports: - - name: mqtt - containerPort: 1883 - - name: mqttssl - containerPort: 8883 - - name: mgmt - containerPort: 8081 - - name: ws - containerPort: 8083 - - name: wss - containerPort: 8084 - - name: dashboard - containerPort: 18083 - - name: ekka - containerPort: 4370 - envFrom: - - configMapRef: - name: helm-emqxs-env - resources: { } - volumeMounts: - - name: emqx-data - mountPath: "/opt/emqx/data/mnesia" - readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" - subPath: emqx_auth_username.conf - readOnly: false - # - name: helm-emqxs-cm - # mountPath: "/opt/emqx/etc/acl.conf" - # subPath: "acl.conf" - # readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/data/loaded_plugins" - subPath: loaded_plugins - readOnly: false - volumes: - - name: emqx-data - persistentVolumeClaim: - claimName: helm-emqxs - - name: helm-emqxs-cm - configMap: - name: helm-emqxs-cm - items: - - key: emqx_auth_username.conf - path: emqx_auth_username.conf - - key: acl.conf - path: acl.conf - - key: loaded_plugins - path: loaded_plugins ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -rules: - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - watch - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -subjects: - - kind: ServiceAccount - name: helm-emqxs - namespace: bjtg -roleRef: - kind: Role - name: helm-emqxs - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - port: 1883 - name: mqtt - targetPort: 1883 - nodePort: 31883 - - port: 18083 - name: dashboard - targetPort: 18083 - nodePort: 38085 - - port: 8083 - name: mqtt-websocket - targetPort: 8083 - nodePort: 38083 ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs-headless - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - clusterIP: None - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - name: mqtt - port: 1883 - protocol: TCP - targetPort: 1883 - - name: mqttssl - port: 8883 - protocol: TCP - targetPort: 8883 - - name: mgmt - port: 8081 - protocol: TCP - targetPort: 8081 - - name: websocket - port: 8083 - protocol: TCP - targetPort: 8083 - - name: wss - port: 8084 - protocol: TCP - targetPort: 8084 - - name: dashboard - port: 18083 - protocol: TCP - targetPort: 18083 - - name: ekka - port: 4370 - protocol: TCP - targetPort: 4370 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-emqxs - namespace: bjtg ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-env - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 - EMQX_NAME: helm-emqxs - EMQX_CLUSTER__DISCOVERY: k8s - EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs - EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" - EMQX_CLUSTER__K8S__namespace: bjtg - EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local - EMQX_ALLOW_ANONYMOUS: "false" - EMQX_ACL_NOMATCH: "deny" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-cm - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - emqx_auth_username.conf: |- - auth.user.1.username = cmlc - auth.user.1.password = odD8#Ve7.B - auth.user.password_hash = sha256 - - acl.conf: |- - {allow, {user, "admin"}, pubsub, ["admin/#"]}. - {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. - {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. - {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. - {allow, all}. - - loaded_plugins: |- - {emqx_auth_username,true}. - {emqx_management, true}. - {emqx_recon, true}. - {emqx_retainer, false}. - {emqx_dashboard, true}. - {emqx_telemetry, true}. - {emqx_rule_engine, true}. - {emqx_bridge_mqtt, false}. ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - replicas: 1 - serviceName: helm-emqxs-headless - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - template: - metadata: - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - spec: - affinity: { } - serviceAccountName: helm-emqxs - containers: - - name: helm-emqxs - image: 10.250.0.110:8033/cmii/emqx:5.5.1 - imagePullPolicy: Always - ports: - - name: mqtt - containerPort: 1883 - - name: mqttssl - containerPort: 8883 - - name: mgmt - containerPort: 8081 - - name: ws - containerPort: 8083 - - name: wss - containerPort: 8084 - - name: dashboard - containerPort: 18083 - - name: ekka - containerPort: 4370 - envFrom: - - configMapRef: - name: helm-emqxs-env - resources: { } - volumeMounts: - - name: emqx-data - mountPath: "/opt/emqx/data/mnesia" - readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" - subPath: emqx_auth_username.conf - readOnly: false - # - name: helm-emqxs-cm - # mountPath: "/opt/emqx/etc/acl.conf" - # subPath: "acl.conf" - # readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/data/loaded_plugins" - subPath: loaded_plugins - readOnly: false - volumes: - - name: emqx-data - persistentVolumeClaim: - claimName: helm-emqxs - - name: helm-emqxs-cm - configMap: - name: helm-emqxs-cm - items: - - key: emqx_auth_username.conf - path: emqx_auth_username.conf - - key: acl.conf - path: acl.conf - - key: loaded_plugins - path: loaded_plugins ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -rules: - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - watch - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -subjects: - - kind: ServiceAccount - name: helm-emqxs - namespace: bjtg -roleRef: - kind: Role - name: helm-emqxs - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - port: 1883 - name: mqtt - targetPort: 1883 - nodePort: 31883 - - port: 18083 - name: dashboard - targetPort: 18083 - nodePort: 38085 - - port: 8083 - name: mqtt-websocket - targetPort: 8083 - nodePort: 38083 ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs-headless - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - clusterIP: None - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - name: mqtt - port: 1883 - protocol: TCP - targetPort: 1883 - - name: mqttssl - port: 8883 - protocol: TCP - targetPort: 8883 - - name: mgmt - port: 8081 - protocol: TCP - targetPort: 8081 - - name: websocket - port: 8083 - protocol: TCP - targetPort: 8083 - - name: wss - port: 8084 - protocol: TCP - targetPort: 8084 - - name: dashboard - port: 18083 - protocol: TCP - targetPort: 18083 - - name: ekka - port: 4370 - protocol: TCP - targetPort: 4370 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-emqxs - namespace: bjtg ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-env - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 - EMQX_NAME: helm-emqxs - EMQX_CLUSTER__DISCOVERY: k8s - EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs - EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" - EMQX_CLUSTER__K8S__namespace: bjtg - EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local - EMQX_ALLOW_ANONYMOUS: "false" - EMQX_ACL_NOMATCH: "deny" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-emqxs-cm - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -data: - emqx_auth_username.conf: |- - auth.user.1.username = cmlc - auth.user.1.password = odD8#Ve7.B - auth.user.password_hash = sha256 - - acl.conf: |- - {allow, {user, "admin"}, pubsub, ["admin/#"]}. - {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. - {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. - {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. - {allow, all}. - - loaded_plugins: |- - {emqx_auth_username,true}. - {emqx_management, true}. - {emqx_recon, true}. - {emqx_retainer, false}. - {emqx_dashboard, true}. - {emqx_telemetry, true}. - {emqx_rule_engine, true}. - {emqx_bridge_mqtt, false}. ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - replicas: 1 - serviceName: helm-emqxs-headless - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - template: - metadata: - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - spec: - affinity: { } - serviceAccountName: helm-emqxs - containers: - - name: helm-emqxs - image: 10.250.0.110:8033/cmii/emqx:5.5.1 - imagePullPolicy: Always - ports: - - name: mqtt - containerPort: 1883 - - name: mqttssl - containerPort: 8883 - - name: mgmt - containerPort: 8081 - - name: ws - containerPort: 8083 - - name: wss - containerPort: 8084 - - name: dashboard - containerPort: 18083 - - name: ekka - containerPort: 4370 - envFrom: - - configMapRef: - name: helm-emqxs-env - resources: { } - volumeMounts: - - name: emqx-data - mountPath: "/opt/emqx/data/mnesia" - readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" - subPath: emqx_auth_username.conf - readOnly: false - # - name: helm-emqxs-cm - # mountPath: "/opt/emqx/etc/acl.conf" - # subPath: "acl.conf" - # readOnly: false - - name: helm-emqxs-cm - mountPath: "/opt/emqx/data/loaded_plugins" - subPath: loaded_plugins - readOnly: false - volumes: - - name: emqx-data - persistentVolumeClaim: - claimName: helm-emqxs - - name: helm-emqxs-cm - configMap: - name: helm-emqxs-cm - items: - - key: emqx_auth_username.conf - path: emqx_auth_username.conf - - key: acl.conf - path: acl.conf - - key: loaded_plugins - path: loaded_plugins ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -rules: - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - watch - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-emqxs - namespace: bjtg -subjects: - - kind: ServiceAccount - name: helm-emqxs - namespace: bjtg -roleRef: - kind: Role - name: helm-emqxs - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - port: 1883 - name: mqtt - targetPort: 1883 - nodePort: 31883 - - port: 18083 - name: dashboard - targetPort: 18083 - nodePort: 38085 - - port: 8083 - name: mqtt-websocket - targetPort: 8083 - nodePort: 38083 ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-emqxs-headless - namespace: bjtg - labels: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - helm.sh/chart: emqx-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - clusterIP: None - selector: - cmii.type: middleware - cmii.app: helm-emqxs - cmii.emqx.architecture: cluster - ports: - - name: mqtt - port: 1883 - protocol: TCP - targetPort: 1883 - - name: mqttssl - port: 8883 - protocol: TCP - targetPort: 8883 - - name: mgmt - port: 8081 - protocol: TCP - targetPort: 8081 - - name: websocket - port: 8083 - protocol: TCP - targetPort: 8083 - - name: wss - port: 8084 - protocol: TCP - targetPort: 8084 - - name: dashboard - port: 18083 - protocol: TCP - targetPort: 18083 - - name: ekka - port: 4370 - protocol: TCP - targetPort: 4370 diff --git a/agent-operator/deploy/z_bjtg/k8s-ingress.yaml b/agent-operator/deploy/z_bjtg/k8s-ingress.yaml deleted file mode 100755 index d58c556..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-ingress.yaml +++ /dev/null @@ -1,3264 +0,0 @@ ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: frontend-applications-ingress - namespace: bjtg - labels: - type: frontend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite ^(/green)$ $1/ redirect; - rewrite ^(/supervision)$ $1/ redirect; - rewrite ^(/inspection)$ $1/ redirect; - rewrite ^(/park)$ $1/ redirect; - rewrite ^(/pangu)$ $1/ redirect; - rewrite ^(/ai-brain)$ $1/ redirect; - rewrite ^(/base)$ $1/ redirect; - rewrite ^(/cms)$ $1/ redirect; - rewrite ^(/cmsportal)$ $1/ redirect; - rewrite ^(/detection)$ $1/ redirect; - rewrite ^(/emergency)$ $1/ redirect; - rewrite ^(/hyper)$ $1/ redirect; - rewrite ^(/logistics)$ $1/ redirect; - rewrite ^(/mws)$ $1/ redirect; - rewrite ^(/mws-admin)$ $1/ redirect; - rewrite ^(/oms)$ $1/ redirect; - rewrite ^(/open)$ $1/ redirect; - rewrite ^(/security)$ $1/ redirect; - rewrite ^(/share)$ $1/ redirect; - rewrite ^(/splice)$ $1/ redirect; - rewrite ^(/splice-visual)$ $1/ redirect; - rewrite ^(/traffic)$ $1/ redirect; - rewrite ^(/visualization)$ $1/ redirect; - rewrite ^(/communication)$ $1/ redirect; - rewrite ^(/infrastructure)$ $1/ redirect; - rewrite ^(/media)$ $1/ redirect; - rewrite ^(/seniclive)$ $1/ redirect; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /inspection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /supervision/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervision - servicePort: 9528 - - path: /supervisionh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervisionh5 - servicePort: 9528 - - path: /green/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /park/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /pangu/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /emersupport/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /infrastructure/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /ai-brain/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-ai-brain - servicePort: 9528 - - path: /base/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-base - servicePort: 9528 - - path: /cms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms - servicePort: 9528 - - path: /cmsportal/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /detection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-detection - servicePort: 9528 - - path: /emergency/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-emergency-rescue - servicePort: 9528 - - path: /hyper/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-hyperspectral - servicePort: 9528 - - path: /logistics/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-logistics - servicePort: 9528 - - path: /mws/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws - servicePort: 9528 - - path: /mws-admin/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws-admin - servicePort: 9528 - - path: /oms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-oms - servicePort: 9528 - - path: /open/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-open - servicePort: 9528 - - path: /security/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /share/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-share - servicePort: 9528 - - path: /splice/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice - servicePort: 9528 - - path: /splice-visual/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice-visual - servicePort: 9528 - - path: /traffic/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /visualization/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-visualization - servicePort: 9528 - - path: /communication/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /media/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-media - servicePort: 9528 - - path: /seniclive/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-seniclive - servicePort: 9528 - - path: /jiangsuwenlv/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-jiangsuwenlv - servicePort: 9528 - - path: /qinghaitourism/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-qinghaitourism - servicePort: 9528 - - path: /securityh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-securityh5 - servicePort: 9528 - - path: /fireRescue/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: backend-applications-ingress - namespace: bjtg - labels: - type: backend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" -spec: - rules: - - host: cmii-admin-data.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-data - servicePort: 8080 - - host: cmii-admin-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - host: cmii-admin-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-user - servicePort: 8080 - - host: cmii-open-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - host: cmii-uav-airspace.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-airspace - servicePort: 8080 - - host: cmii-uav-brain.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-brain - servicePort: 8080 - - host: cmii-uav-clusters.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-clusters - servicePort: 8080 - - host: cmii-uav-cms.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cms - servicePort: 8080 - - host: cmii-uav-data-post-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-data-post-process - servicePort: 8080 - - host: cmii-uav-developer.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-developer - servicePort: 8080 - - host: cmii-uav-device.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-device - servicePort: 8080 - - host: cmii-uav-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 - - host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-industrial-portfolio - servicePort: 8080 - - host: cmii-uav-kpi-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-kpi-monitor - servicePort: 8080 - - host: cmii-uav-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-live - servicePort: 8080 - - host: cmii-uav-cloud-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cloud-live - servicePort: 8080 - - host: cmii-uav-logger.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-logger - servicePort: 8080 - - host: cmii-uav-material-warehouse.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-material-warehouse - servicePort: 8080 - - host: cmii-uav-mission.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mission - servicePort: 8080 - - host: cmii-uav-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-monitor - servicePort: 8080 - - host: cmii-uav-mqtthandler.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mqtthandler - servicePort: 8080 - - host: cmii-uav-notice.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-notice - servicePort: 8080 - - host: cmii-uav-oauth.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-oauth - servicePort: 8080 - - host: cmii-uav-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-process - servicePort: 8080 - - host: cmii-uav-security-system.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-security-system - servicePort: 8080 - - host: cmii-uav-surveillance.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-surveillance - servicePort: 8080 - - host: cmii-uav-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-user - servicePort: 8080 - - host: cmii-uav-waypoint.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-waypoint - servicePort: 8080 - - host: cmii-uav-alarm.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-alarm - servicePort: 8080 - - host: cmii-uav-emergency.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-emergency - servicePort: 8080 - - host: cmii-uav-integration.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-integration - servicePort: 8080 - - host: cmii-suav-supervision.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-supervision - servicePort: 8080 - - host: cmii-uav-gis-server.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gis-server - servicePort: 8080 - - host: cmii-uav-grid-datasource.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-grid-datasource - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: all-gateways-ingress - namespace: bjtg - labels: - type: api-gateway - octopus.control: all-ingress-config-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /oms/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - path: /open/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - path: /api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: frontend-applications-ingress - namespace: bjtg - labels: - type: frontend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite ^(/green)$ $1/ redirect; - rewrite ^(/supervision)$ $1/ redirect; - rewrite ^(/inspection)$ $1/ redirect; - rewrite ^(/park)$ $1/ redirect; - rewrite ^(/pangu)$ $1/ redirect; - rewrite ^(/ai-brain)$ $1/ redirect; - rewrite ^(/base)$ $1/ redirect; - rewrite ^(/cms)$ $1/ redirect; - rewrite ^(/cmsportal)$ $1/ redirect; - rewrite ^(/detection)$ $1/ redirect; - rewrite ^(/emergency)$ $1/ redirect; - rewrite ^(/hyper)$ $1/ redirect; - rewrite ^(/logistics)$ $1/ redirect; - rewrite ^(/mws)$ $1/ redirect; - rewrite ^(/mws-admin)$ $1/ redirect; - rewrite ^(/oms)$ $1/ redirect; - rewrite ^(/open)$ $1/ redirect; - rewrite ^(/security)$ $1/ redirect; - rewrite ^(/share)$ $1/ redirect; - rewrite ^(/splice)$ $1/ redirect; - rewrite ^(/splice-visual)$ $1/ redirect; - rewrite ^(/traffic)$ $1/ redirect; - rewrite ^(/visualization)$ $1/ redirect; - rewrite ^(/communication)$ $1/ redirect; - rewrite ^(/infrastructure)$ $1/ redirect; - rewrite ^(/media)$ $1/ redirect; - rewrite ^(/seniclive)$ $1/ redirect; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /inspection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /supervision/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervision - servicePort: 9528 - - path: /supervisionh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervisionh5 - servicePort: 9528 - - path: /green/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /park/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /pangu/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /emersupport/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /infrastructure/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /ai-brain/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-ai-brain - servicePort: 9528 - - path: /base/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-base - servicePort: 9528 - - path: /cms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms - servicePort: 9528 - - path: /cmsportal/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /detection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-detection - servicePort: 9528 - - path: /emergency/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-emergency-rescue - servicePort: 9528 - - path: /hyper/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-hyperspectral - servicePort: 9528 - - path: /logistics/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-logistics - servicePort: 9528 - - path: /mws/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws - servicePort: 9528 - - path: /mws-admin/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws-admin - servicePort: 9528 - - path: /oms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-oms - servicePort: 9528 - - path: /open/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-open - servicePort: 9528 - - path: /security/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /share/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-share - servicePort: 9528 - - path: /splice/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice - servicePort: 9528 - - path: /splice-visual/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice-visual - servicePort: 9528 - - path: /traffic/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /visualization/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-visualization - servicePort: 9528 - - path: /communication/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /media/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-media - servicePort: 9528 - - path: /seniclive/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-seniclive - servicePort: 9528 - - path: /jiangsuwenlv/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-jiangsuwenlv - servicePort: 9528 - - path: /qinghaitourism/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-qinghaitourism - servicePort: 9528 - - path: /securityh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-securityh5 - servicePort: 9528 - - path: /fireRescue/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: backend-applications-ingress - namespace: bjtg - labels: - type: backend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" -spec: - rules: - - host: cmii-admin-data.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-data - servicePort: 8080 - - host: cmii-admin-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - host: cmii-admin-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-user - servicePort: 8080 - - host: cmii-open-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - host: cmii-uav-airspace.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-airspace - servicePort: 8080 - - host: cmii-uav-brain.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-brain - servicePort: 8080 - - host: cmii-uav-clusters.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-clusters - servicePort: 8080 - - host: cmii-uav-cms.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cms - servicePort: 8080 - - host: cmii-uav-data-post-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-data-post-process - servicePort: 8080 - - host: cmii-uav-developer.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-developer - servicePort: 8080 - - host: cmii-uav-device.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-device - servicePort: 8080 - - host: cmii-uav-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 - - host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-industrial-portfolio - servicePort: 8080 - - host: cmii-uav-kpi-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-kpi-monitor - servicePort: 8080 - - host: cmii-uav-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-live - servicePort: 8080 - - host: cmii-uav-cloud-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cloud-live - servicePort: 8080 - - host: cmii-uav-logger.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-logger - servicePort: 8080 - - host: cmii-uav-material-warehouse.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-material-warehouse - servicePort: 8080 - - host: cmii-uav-mission.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mission - servicePort: 8080 - - host: cmii-uav-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-monitor - servicePort: 8080 - - host: cmii-uav-mqtthandler.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mqtthandler - servicePort: 8080 - - host: cmii-uav-notice.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-notice - servicePort: 8080 - - host: cmii-uav-oauth.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-oauth - servicePort: 8080 - - host: cmii-uav-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-process - servicePort: 8080 - - host: cmii-uav-security-system.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-security-system - servicePort: 8080 - - host: cmii-uav-surveillance.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-surveillance - servicePort: 8080 - - host: cmii-uav-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-user - servicePort: 8080 - - host: cmii-uav-waypoint.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-waypoint - servicePort: 8080 - - host: cmii-uav-alarm.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-alarm - servicePort: 8080 - - host: cmii-uav-emergency.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-emergency - servicePort: 8080 - - host: cmii-uav-integration.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-integration - servicePort: 8080 - - host: cmii-suav-supervision.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-supervision - servicePort: 8080 - - host: cmii-uav-gis-server.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gis-server - servicePort: 8080 - - host: cmii-uav-grid-datasource.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-grid-datasource - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: all-gateways-ingress - namespace: bjtg - labels: - type: api-gateway - octopus.control: all-ingress-config-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /oms/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - path: /open/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - path: /api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: frontend-applications-ingress - namespace: bjtg - labels: - type: frontend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite ^(/green)$ $1/ redirect; - rewrite ^(/supervision)$ $1/ redirect; - rewrite ^(/inspection)$ $1/ redirect; - rewrite ^(/park)$ $1/ redirect; - rewrite ^(/pangu)$ $1/ redirect; - rewrite ^(/ai-brain)$ $1/ redirect; - rewrite ^(/base)$ $1/ redirect; - rewrite ^(/cms)$ $1/ redirect; - rewrite ^(/cmsportal)$ $1/ redirect; - rewrite ^(/detection)$ $1/ redirect; - rewrite ^(/emergency)$ $1/ redirect; - rewrite ^(/hyper)$ $1/ redirect; - rewrite ^(/logistics)$ $1/ redirect; - rewrite ^(/mws)$ $1/ redirect; - rewrite ^(/mws-admin)$ $1/ redirect; - rewrite ^(/oms)$ $1/ redirect; - rewrite ^(/open)$ $1/ redirect; - rewrite ^(/security)$ $1/ redirect; - rewrite ^(/share)$ $1/ redirect; - rewrite ^(/splice)$ $1/ redirect; - rewrite ^(/splice-visual)$ $1/ redirect; - rewrite ^(/traffic)$ $1/ redirect; - rewrite ^(/visualization)$ $1/ redirect; - rewrite ^(/communication)$ $1/ redirect; - rewrite ^(/infrastructure)$ $1/ redirect; - rewrite ^(/media)$ $1/ redirect; - rewrite ^(/seniclive)$ $1/ redirect; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /inspection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /supervision/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervision - servicePort: 9528 - - path: /supervisionh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervisionh5 - servicePort: 9528 - - path: /green/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /park/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /pangu/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /emersupport/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /infrastructure/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /ai-brain/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-ai-brain - servicePort: 9528 - - path: /base/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-base - servicePort: 9528 - - path: /cms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms - servicePort: 9528 - - path: /cmsportal/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /detection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-detection - servicePort: 9528 - - path: /emergency/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-emergency-rescue - servicePort: 9528 - - path: /hyper/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-hyperspectral - servicePort: 9528 - - path: /logistics/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-logistics - servicePort: 9528 - - path: /mws/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws - servicePort: 9528 - - path: /mws-admin/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws-admin - servicePort: 9528 - - path: /oms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-oms - servicePort: 9528 - - path: /open/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-open - servicePort: 9528 - - path: /security/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /share/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-share - servicePort: 9528 - - path: /splice/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice - servicePort: 9528 - - path: /splice-visual/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice-visual - servicePort: 9528 - - path: /traffic/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /visualization/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-visualization - servicePort: 9528 - - path: /communication/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /media/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-media - servicePort: 9528 - - path: /seniclive/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-seniclive - servicePort: 9528 - - path: /jiangsuwenlv/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-jiangsuwenlv - servicePort: 9528 - - path: /qinghaitourism/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-qinghaitourism - servicePort: 9528 - - path: /securityh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-securityh5 - servicePort: 9528 - - path: /fireRescue/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: backend-applications-ingress - namespace: bjtg - labels: - type: backend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" -spec: - rules: - - host: cmii-admin-data.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-data - servicePort: 8080 - - host: cmii-admin-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - host: cmii-admin-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-user - servicePort: 8080 - - host: cmii-open-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - host: cmii-uav-airspace.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-airspace - servicePort: 8080 - - host: cmii-uav-brain.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-brain - servicePort: 8080 - - host: cmii-uav-clusters.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-clusters - servicePort: 8080 - - host: cmii-uav-cms.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cms - servicePort: 8080 - - host: cmii-uav-data-post-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-data-post-process - servicePort: 8080 - - host: cmii-uav-developer.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-developer - servicePort: 8080 - - host: cmii-uav-device.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-device - servicePort: 8080 - - host: cmii-uav-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 - - host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-industrial-portfolio - servicePort: 8080 - - host: cmii-uav-kpi-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-kpi-monitor - servicePort: 8080 - - host: cmii-uav-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-live - servicePort: 8080 - - host: cmii-uav-cloud-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cloud-live - servicePort: 8080 - - host: cmii-uav-logger.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-logger - servicePort: 8080 - - host: cmii-uav-material-warehouse.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-material-warehouse - servicePort: 8080 - - host: cmii-uav-mission.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mission - servicePort: 8080 - - host: cmii-uav-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-monitor - servicePort: 8080 - - host: cmii-uav-mqtthandler.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mqtthandler - servicePort: 8080 - - host: cmii-uav-notice.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-notice - servicePort: 8080 - - host: cmii-uav-oauth.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-oauth - servicePort: 8080 - - host: cmii-uav-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-process - servicePort: 8080 - - host: cmii-uav-security-system.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-security-system - servicePort: 8080 - - host: cmii-uav-surveillance.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-surveillance - servicePort: 8080 - - host: cmii-uav-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-user - servicePort: 8080 - - host: cmii-uav-waypoint.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-waypoint - servicePort: 8080 - - host: cmii-uav-alarm.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-alarm - servicePort: 8080 - - host: cmii-uav-emergency.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-emergency - servicePort: 8080 - - host: cmii-uav-integration.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-integration - servicePort: 8080 - - host: cmii-suav-supervision.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-supervision - servicePort: 8080 - - host: cmii-uav-gis-server.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gis-server - servicePort: 8080 - - host: cmii-uav-grid-datasource.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-grid-datasource - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: all-gateways-ingress - namespace: bjtg - labels: - type: api-gateway - octopus.control: all-ingress-config-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /oms/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - path: /open/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - path: /api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: frontend-applications-ingress - namespace: bjtg - labels: - type: frontend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite ^(/green)$ $1/ redirect; - rewrite ^(/supervision)$ $1/ redirect; - rewrite ^(/inspection)$ $1/ redirect; - rewrite ^(/park)$ $1/ redirect; - rewrite ^(/pangu)$ $1/ redirect; - rewrite ^(/ai-brain)$ $1/ redirect; - rewrite ^(/base)$ $1/ redirect; - rewrite ^(/cms)$ $1/ redirect; - rewrite ^(/cmsportal)$ $1/ redirect; - rewrite ^(/detection)$ $1/ redirect; - rewrite ^(/emergency)$ $1/ redirect; - rewrite ^(/hyper)$ $1/ redirect; - rewrite ^(/logistics)$ $1/ redirect; - rewrite ^(/mws)$ $1/ redirect; - rewrite ^(/mws-admin)$ $1/ redirect; - rewrite ^(/oms)$ $1/ redirect; - rewrite ^(/open)$ $1/ redirect; - rewrite ^(/security)$ $1/ redirect; - rewrite ^(/share)$ $1/ redirect; - rewrite ^(/splice)$ $1/ redirect; - rewrite ^(/splice-visual)$ $1/ redirect; - rewrite ^(/traffic)$ $1/ redirect; - rewrite ^(/visualization)$ $1/ redirect; - rewrite ^(/communication)$ $1/ redirect; - rewrite ^(/infrastructure)$ $1/ redirect; - rewrite ^(/media)$ $1/ redirect; - rewrite ^(/seniclive)$ $1/ redirect; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /inspection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /supervision/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervision - servicePort: 9528 - - path: /supervisionh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervisionh5 - servicePort: 9528 - - path: /green/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /park/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /pangu/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /emersupport/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /infrastructure/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /ai-brain/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-ai-brain - servicePort: 9528 - - path: /base/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-base - servicePort: 9528 - - path: /cms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms - servicePort: 9528 - - path: /cmsportal/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /detection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-detection - servicePort: 9528 - - path: /emergency/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-emergency-rescue - servicePort: 9528 - - path: /hyper/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-hyperspectral - servicePort: 9528 - - path: /logistics/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-logistics - servicePort: 9528 - - path: /mws/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws - servicePort: 9528 - - path: /mws-admin/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws-admin - servicePort: 9528 - - path: /oms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-oms - servicePort: 9528 - - path: /open/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-open - servicePort: 9528 - - path: /security/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /share/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-share - servicePort: 9528 - - path: /splice/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice - servicePort: 9528 - - path: /splice-visual/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice-visual - servicePort: 9528 - - path: /traffic/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /visualization/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-visualization - servicePort: 9528 - - path: /communication/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /media/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-media - servicePort: 9528 - - path: /seniclive/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-seniclive - servicePort: 9528 - - path: /jiangsuwenlv/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-jiangsuwenlv - servicePort: 9528 - - path: /qinghaitourism/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-qinghaitourism - servicePort: 9528 - - path: /securityh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-securityh5 - servicePort: 9528 - - path: /fireRescue/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: backend-applications-ingress - namespace: bjtg - labels: - type: backend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" -spec: - rules: - - host: cmii-admin-data.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-data - servicePort: 8080 - - host: cmii-admin-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - host: cmii-admin-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-user - servicePort: 8080 - - host: cmii-open-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - host: cmii-uav-airspace.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-airspace - servicePort: 8080 - - host: cmii-uav-brain.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-brain - servicePort: 8080 - - host: cmii-uav-clusters.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-clusters - servicePort: 8080 - - host: cmii-uav-cms.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cms - servicePort: 8080 - - host: cmii-uav-data-post-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-data-post-process - servicePort: 8080 - - host: cmii-uav-developer.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-developer - servicePort: 8080 - - host: cmii-uav-device.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-device - servicePort: 8080 - - host: cmii-uav-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 - - host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-industrial-portfolio - servicePort: 8080 - - host: cmii-uav-kpi-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-kpi-monitor - servicePort: 8080 - - host: cmii-uav-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-live - servicePort: 8080 - - host: cmii-uav-cloud-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cloud-live - servicePort: 8080 - - host: cmii-uav-logger.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-logger - servicePort: 8080 - - host: cmii-uav-material-warehouse.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-material-warehouse - servicePort: 8080 - - host: cmii-uav-mission.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mission - servicePort: 8080 - - host: cmii-uav-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-monitor - servicePort: 8080 - - host: cmii-uav-mqtthandler.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mqtthandler - servicePort: 8080 - - host: cmii-uav-notice.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-notice - servicePort: 8080 - - host: cmii-uav-oauth.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-oauth - servicePort: 8080 - - host: cmii-uav-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-process - servicePort: 8080 - - host: cmii-uav-security-system.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-security-system - servicePort: 8080 - - host: cmii-uav-surveillance.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-surveillance - servicePort: 8080 - - host: cmii-uav-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-user - servicePort: 8080 - - host: cmii-uav-waypoint.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-waypoint - servicePort: 8080 - - host: cmii-uav-alarm.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-alarm - servicePort: 8080 - - host: cmii-uav-emergency.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-emergency - servicePort: 8080 - - host: cmii-uav-integration.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-integration - servicePort: 8080 - - host: cmii-suav-supervision.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-supervision - servicePort: 8080 - - host: cmii-uav-gis-server.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gis-server - servicePort: 8080 - - host: cmii-uav-grid-datasource.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-grid-datasource - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: all-gateways-ingress - namespace: bjtg - labels: - type: api-gateway - octopus.control: all-ingress-config-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /oms/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - path: /open/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - path: /api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: frontend-applications-ingress - namespace: bjtg - labels: - type: frontend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite ^(/green)$ $1/ redirect; - rewrite ^(/supervision)$ $1/ redirect; - rewrite ^(/inspection)$ $1/ redirect; - rewrite ^(/park)$ $1/ redirect; - rewrite ^(/pangu)$ $1/ redirect; - rewrite ^(/ai-brain)$ $1/ redirect; - rewrite ^(/base)$ $1/ redirect; - rewrite ^(/cms)$ $1/ redirect; - rewrite ^(/cmsportal)$ $1/ redirect; - rewrite ^(/detection)$ $1/ redirect; - rewrite ^(/emergency)$ $1/ redirect; - rewrite ^(/hyper)$ $1/ redirect; - rewrite ^(/logistics)$ $1/ redirect; - rewrite ^(/mws)$ $1/ redirect; - rewrite ^(/mws-admin)$ $1/ redirect; - rewrite ^(/oms)$ $1/ redirect; - rewrite ^(/open)$ $1/ redirect; - rewrite ^(/security)$ $1/ redirect; - rewrite ^(/share)$ $1/ redirect; - rewrite ^(/splice)$ $1/ redirect; - rewrite ^(/splice-visual)$ $1/ redirect; - rewrite ^(/traffic)$ $1/ redirect; - rewrite ^(/visualization)$ $1/ redirect; - rewrite ^(/communication)$ $1/ redirect; - rewrite ^(/infrastructure)$ $1/ redirect; - rewrite ^(/media)$ $1/ redirect; - rewrite ^(/seniclive)$ $1/ redirect; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /inspection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /supervision/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervision - servicePort: 9528 - - path: /supervisionh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervisionh5 - servicePort: 9528 - - path: /green/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /park/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /pangu/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /emersupport/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /infrastructure/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /ai-brain/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-ai-brain - servicePort: 9528 - - path: /base/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-base - servicePort: 9528 - - path: /cms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms - servicePort: 9528 - - path: /cmsportal/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /detection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-detection - servicePort: 9528 - - path: /emergency/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-emergency-rescue - servicePort: 9528 - - path: /hyper/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-hyperspectral - servicePort: 9528 - - path: /logistics/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-logistics - servicePort: 9528 - - path: /mws/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws - servicePort: 9528 - - path: /mws-admin/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws-admin - servicePort: 9528 - - path: /oms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-oms - servicePort: 9528 - - path: /open/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-open - servicePort: 9528 - - path: /security/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /share/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-share - servicePort: 9528 - - path: /splice/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice - servicePort: 9528 - - path: /splice-visual/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice-visual - servicePort: 9528 - - path: /traffic/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /visualization/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-visualization - servicePort: 9528 - - path: /communication/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /media/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-media - servicePort: 9528 - - path: /seniclive/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-seniclive - servicePort: 9528 - - path: /jiangsuwenlv/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-jiangsuwenlv - servicePort: 9528 - - path: /qinghaitourism/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-qinghaitourism - servicePort: 9528 - - path: /securityh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-securityh5 - servicePort: 9528 - - path: /fireRescue/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: backend-applications-ingress - namespace: bjtg - labels: - type: backend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" -spec: - rules: - - host: cmii-admin-data.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-data - servicePort: 8080 - - host: cmii-admin-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - host: cmii-admin-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-user - servicePort: 8080 - - host: cmii-open-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - host: cmii-uav-airspace.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-airspace - servicePort: 8080 - - host: cmii-uav-brain.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-brain - servicePort: 8080 - - host: cmii-uav-clusters.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-clusters - servicePort: 8080 - - host: cmii-uav-cms.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cms - servicePort: 8080 - - host: cmii-uav-data-post-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-data-post-process - servicePort: 8080 - - host: cmii-uav-developer.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-developer - servicePort: 8080 - - host: cmii-uav-device.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-device - servicePort: 8080 - - host: cmii-uav-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 - - host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-industrial-portfolio - servicePort: 8080 - - host: cmii-uav-kpi-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-kpi-monitor - servicePort: 8080 - - host: cmii-uav-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-live - servicePort: 8080 - - host: cmii-uav-cloud-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cloud-live - servicePort: 8080 - - host: cmii-uav-logger.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-logger - servicePort: 8080 - - host: cmii-uav-material-warehouse.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-material-warehouse - servicePort: 8080 - - host: cmii-uav-mission.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mission - servicePort: 8080 - - host: cmii-uav-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-monitor - servicePort: 8080 - - host: cmii-uav-mqtthandler.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mqtthandler - servicePort: 8080 - - host: cmii-uav-notice.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-notice - servicePort: 8080 - - host: cmii-uav-oauth.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-oauth - servicePort: 8080 - - host: cmii-uav-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-process - servicePort: 8080 - - host: cmii-uav-security-system.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-security-system - servicePort: 8080 - - host: cmii-uav-surveillance.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-surveillance - servicePort: 8080 - - host: cmii-uav-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-user - servicePort: 8080 - - host: cmii-uav-waypoint.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-waypoint - servicePort: 8080 - - host: cmii-uav-alarm.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-alarm - servicePort: 8080 - - host: cmii-uav-emergency.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-emergency - servicePort: 8080 - - host: cmii-uav-integration.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-integration - servicePort: 8080 - - host: cmii-suav-supervision.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-supervision - servicePort: 8080 - - host: cmii-uav-gis-server.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gis-server - servicePort: 8080 - - host: cmii-uav-grid-datasource.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-grid-datasource - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: all-gateways-ingress - namespace: bjtg - labels: - type: api-gateway - octopus.control: all-ingress-config-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /oms/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - path: /open/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - path: /api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: frontend-applications-ingress - namespace: bjtg - labels: - type: frontend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite ^(/green)$ $1/ redirect; - rewrite ^(/supervision)$ $1/ redirect; - rewrite ^(/inspection)$ $1/ redirect; - rewrite ^(/park)$ $1/ redirect; - rewrite ^(/pangu)$ $1/ redirect; - rewrite ^(/ai-brain)$ $1/ redirect; - rewrite ^(/base)$ $1/ redirect; - rewrite ^(/cms)$ $1/ redirect; - rewrite ^(/cmsportal)$ $1/ redirect; - rewrite ^(/detection)$ $1/ redirect; - rewrite ^(/emergency)$ $1/ redirect; - rewrite ^(/hyper)$ $1/ redirect; - rewrite ^(/logistics)$ $1/ redirect; - rewrite ^(/mws)$ $1/ redirect; - rewrite ^(/mws-admin)$ $1/ redirect; - rewrite ^(/oms)$ $1/ redirect; - rewrite ^(/open)$ $1/ redirect; - rewrite ^(/security)$ $1/ redirect; - rewrite ^(/share)$ $1/ redirect; - rewrite ^(/splice)$ $1/ redirect; - rewrite ^(/splice-visual)$ $1/ redirect; - rewrite ^(/traffic)$ $1/ redirect; - rewrite ^(/visualization)$ $1/ redirect; - rewrite ^(/communication)$ $1/ redirect; - rewrite ^(/infrastructure)$ $1/ redirect; - rewrite ^(/media)$ $1/ redirect; - rewrite ^(/seniclive)$ $1/ redirect; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /inspection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /supervision/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervision - servicePort: 9528 - - path: /supervisionh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-platform-supervisionh5 - servicePort: 9528 - - path: /green/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /park/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /pangu/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /emersupport/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /infrastructure/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /ai-brain/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-ai-brain - servicePort: 9528 - - path: /base/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-base - servicePort: 9528 - - path: /cms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms - servicePort: 9528 - - path: /cmsportal/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-cms-portal - servicePort: 9528 - - path: /detection/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-detection - servicePort: 9528 - - path: /emergency/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-emergency-rescue - servicePort: 9528 - - path: /hyper/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-hyperspectral - servicePort: 9528 - - path: /logistics/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-logistics - servicePort: 9528 - - path: /mws/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws - servicePort: 9528 - - path: /mws-admin/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-mws-admin - servicePort: 9528 - - path: /oms/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-oms - servicePort: 9528 - - path: /open/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-open - servicePort: 9528 - - path: /security/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /share/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-share - servicePort: 9528 - - path: /splice/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice - servicePort: 9528 - - path: /splice-visual/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-splice-visual - servicePort: 9528 - - path: /traffic/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /visualization/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-visualization - servicePort: 9528 - - path: /communication/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 - - path: /media/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-media - servicePort: 9528 - - path: /seniclive/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-seniclive - servicePort: 9528 - - path: /jiangsuwenlv/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-jiangsuwenlv - servicePort: 9528 - - path: /qinghaitourism/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-qinghaitourism - servicePort: 9528 - - path: /securityh5/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform-securityh5 - servicePort: 9528 - - path: /fireRescue/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-platform - servicePort: 9528 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: backend-applications-ingress - namespace: bjtg - labels: - type: backend - octopus.control: all-ingress-config-wdd - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" -spec: - rules: - - host: cmii-admin-data.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-data - servicePort: 8080 - - host: cmii-admin-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - host: cmii-admin-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-user - servicePort: 8080 - - host: cmii-open-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - host: cmii-uav-airspace.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-airspace - servicePort: 8080 - - host: cmii-uav-brain.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-brain - servicePort: 8080 - - host: cmii-uav-clusters.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-clusters - servicePort: 8080 - - host: cmii-uav-cms.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cms - servicePort: 8080 - - host: cmii-uav-data-post-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-data-post-process - servicePort: 8080 - - host: cmii-uav-developer.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-developer - servicePort: 8080 - - host: cmii-uav-device.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-device - servicePort: 8080 - - host: cmii-uav-gateway.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 - - host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-industrial-portfolio - servicePort: 8080 - - host: cmii-uav-kpi-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-kpi-monitor - servicePort: 8080 - - host: cmii-uav-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-live - servicePort: 8080 - - host: cmii-uav-cloud-live.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-cloud-live - servicePort: 8080 - - host: cmii-uav-logger.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-logger - servicePort: 8080 - - host: cmii-uav-material-warehouse.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-material-warehouse - servicePort: 8080 - - host: cmii-uav-mission.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mission - servicePort: 8080 - - host: cmii-uav-monitor.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-monitor - servicePort: 8080 - - host: cmii-uav-mqtthandler.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-mqtthandler - servicePort: 8080 - - host: cmii-uav-notice.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-notice - servicePort: 8080 - - host: cmii-uav-oauth.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-oauth - servicePort: 8080 - - host: cmii-uav-process.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-process - servicePort: 8080 - - host: cmii-uav-security-system.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-security-system - servicePort: 8080 - - host: cmii-uav-surveillance.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-surveillance - servicePort: 8080 - - host: cmii-uav-user.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-user - servicePort: 8080 - - host: cmii-uav-waypoint.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-waypoint - servicePort: 8080 - - host: cmii-uav-alarm.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-alarm - servicePort: 8080 - - host: cmii-uav-emergency.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-emergency - servicePort: 8080 - - host: cmii-uav-integration.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-integration - servicePort: 8080 - - host: cmii-suav-supervision.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-suav-supervision - servicePort: 8080 - - host: cmii-uav-gis-server.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gis-server - servicePort: 8080 - - host: cmii-uav-grid-datasource.uavcloud-bjtg.io - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-grid-datasource - servicePort: 8080 ---- -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: all-gateways-ingress - namespace: bjtg - labels: - type: api-gateway - octopus.control: all-ingress-config-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/enable-cors: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/configuration-snippet: | - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; -spec: - rules: - - host: fake-domain.bjtg.io - http: - paths: - - path: /oms/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-admin-gateway - servicePort: 8080 - - path: /open/api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-open-gateway - servicePort: 8080 - - path: /api/?(.*) - pathType: ImplementationSpecific - backend: - serviceName: cmii-uav-gateway - servicePort: 8080 diff --git a/agent-operator/deploy/z_bjtg/k8s-mongo.yaml b/agent-operator/deploy/z_bjtg/k8s-mongo.yaml deleted file mode 100755 index 70fd990..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-mongo.yaml +++ /dev/null @@ -1,450 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.app: helm-mongo - cmii.type: middleware - ports: - - port: 27017 - name: server-27017 - targetPort: 27017 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-mongo - replicas: 1 - selector: - matchLabels: - cmii.app: helm-mongo - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: helm-mongo - image: 10.250.0.110:8033/cmii/mongo:5.0 - resources: { } - ports: - - containerPort: 27017 - name: mongo27017 - protocol: TCP - env: - - name: MONGO_INITDB_ROOT_USERNAME - value: cmlc - - name: MONGO_INITDB_ROOT_PASSWORD - value: REdPza8#oVlt - volumeMounts: - - name: mongo-data - mountPath: /data/db - readOnly: false - subPath: default/helm-mongo/data/db - volumes: - - name: mongo-data - persistentVolumeClaim: - claimName: helm-mongo ---- ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.app: helm-mongo - cmii.type: middleware - ports: - - port: 27017 - name: server-27017 - targetPort: 27017 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-mongo - replicas: 1 - selector: - matchLabels: - cmii.app: helm-mongo - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: helm-mongo - image: 10.250.0.110:8033/cmii/mongo:5.0 - resources: { } - ports: - - containerPort: 27017 - name: mongo27017 - protocol: TCP - env: - - name: MONGO_INITDB_ROOT_USERNAME - value: cmlc - - name: MONGO_INITDB_ROOT_PASSWORD - value: REdPza8#oVlt - volumeMounts: - - name: mongo-data - mountPath: /data/db - readOnly: false - subPath: default/helm-mongo/data/db - volumes: - - name: mongo-data - persistentVolumeClaim: - claimName: helm-mongo ---- ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.app: helm-mongo - cmii.type: middleware - ports: - - port: 27017 - name: server-27017 - targetPort: 27017 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-mongo - replicas: 1 - selector: - matchLabels: - cmii.app: helm-mongo - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: helm-mongo - image: 10.250.0.110:8033/cmii/mongo:5.0 - resources: { } - ports: - - containerPort: 27017 - name: mongo27017 - protocol: TCP - env: - - name: MONGO_INITDB_ROOT_USERNAME - value: cmlc - - name: MONGO_INITDB_ROOT_PASSWORD - value: REdPza8#oVlt - volumeMounts: - - name: mongo-data - mountPath: /data/db - readOnly: false - subPath: default/helm-mongo/data/db - volumes: - - name: mongo-data - persistentVolumeClaim: - claimName: helm-mongo ---- ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.app: helm-mongo - cmii.type: middleware - ports: - - port: 27017 - name: server-27017 - targetPort: 27017 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-mongo - replicas: 1 - selector: - matchLabels: - cmii.app: helm-mongo - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: helm-mongo - image: 10.250.0.110:8033/cmii/mongo:5.0 - resources: { } - ports: - - containerPort: 27017 - name: mongo27017 - protocol: TCP - env: - - name: MONGO_INITDB_ROOT_USERNAME - value: cmlc - - name: MONGO_INITDB_ROOT_PASSWORD - value: REdPza8#oVlt - volumeMounts: - - name: mongo-data - mountPath: /data/db - readOnly: false - subPath: default/helm-mongo/data/db - volumes: - - name: mongo-data - persistentVolumeClaim: - claimName: helm-mongo ---- ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.app: helm-mongo - cmii.type: middleware - ports: - - port: 27017 - name: server-27017 - targetPort: 27017 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-mongo - replicas: 1 - selector: - matchLabels: - cmii.app: helm-mongo - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: helm-mongo - image: 10.250.0.110:8033/cmii/mongo:5.0 - resources: { } - ports: - - containerPort: 27017 - name: mongo27017 - protocol: TCP - env: - - name: MONGO_INITDB_ROOT_USERNAME - value: cmlc - - name: MONGO_INITDB_ROOT_PASSWORD - value: REdPza8#oVlt - volumeMounts: - - name: mongo-data - mountPath: /data/db - readOnly: false - subPath: default/helm-mongo/data/db - volumes: - - name: mongo-data - persistentVolumeClaim: - claimName: helm-mongo ---- ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - type: ClusterIP - selector: - cmii.app: helm-mongo - cmii.type: middleware - ports: - - port: 27017 - name: server-27017 - targetPort: 27017 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-mongo - replicas: 1 - selector: - matchLabels: - cmii.app: helm-mongo - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-mongo - cmii.type: middleware - helm.sh/chart: mongo-1.1.0 - app.kubernetes.io/managed-by: octopus-control - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: helm-mongo - image: 10.250.0.110:8033/cmii/mongo:5.0 - resources: { } - ports: - - containerPort: 27017 - name: mongo27017 - protocol: TCP - env: - - name: MONGO_INITDB_ROOT_USERNAME - value: cmlc - - name: MONGO_INITDB_ROOT_PASSWORD - value: REdPza8#oVlt - volumeMounts: - - name: mongo-data - mountPath: /data/db - readOnly: false - subPath: default/helm-mongo/data/db - volumes: - - name: mongo-data - persistentVolumeClaim: - claimName: helm-mongo ---- diff --git a/agent-operator/deploy/z_bjtg/k8s-mysql.yaml b/agent-operator/deploy/z_bjtg/k8s-mysql.yaml deleted file mode 100755 index 8ed31d1..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-mysql.yaml +++ /dev/null @@ -1,2526 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - annotations: -secrets: - - name: helm-mysql ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - mysql-root-password: "UXpmWFFoZDNiUQ==" - mysql-password: "S0F0cm5PckFKNw==" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - my.cnf: |- - - [mysqld] - port=3306 - basedir=/opt/bitnami/mysql - datadir=/bitnami/mysql/data - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - socket=/opt/bitnami/mysql/tmp/mysql.sock - log-error=/bitnami/mysql/data/error.log - general_log_file = /bitnami/mysql/data/general.log - slow_query_log_file = /bitnami/mysql/data/slow.log - innodb_data_file_path = ibdata1:512M:autoextend - innodb_buffer_pool_size = 512M - innodb_buffer_pool_instances = 2 - innodb_log_file_size = 512M - innodb_log_files_in_group = 4 - innodb_log_files_in_group = 4 - log-bin = /bitnami/mysql/data/mysql-bin - max_binlog_size=1G - transaction_isolation = REPEATABLE-READ - default_storage_engine = innodb - character-set-server = utf8mb4 - collation-server=utf8mb4_bin - binlog_format = ROW - binlog_rows_query_log_events=on - binlog_cache_size=4M - binlog_expire_logs_seconds = 1296000 - max_binlog_cache_size=2G - gtid_mode = on - enforce_gtid_consistency = 1 - sync_binlog = 1 - innodb_flush_log_at_trx_commit = 1 - innodb_flush_method = O_DIRECT - log_slave_updates=1 - relay_log_recovery = 1 - relay-log-purge = 1 - default_time_zone = '+08:00' - lower_case_table_names=1 - log_bin_trust_function_creators=1 - group_concat_max_len=67108864 - innodb_io_capacity = 4000 - innodb_io_capacity_max = 8000 - innodb_flush_sync = 0 - innodb_flush_neighbors = 0 - innodb_write_io_threads = 8 - innodb_read_io_threads = 8 - innodb_purge_threads = 4 - innodb_page_cleaners = 4 - innodb_open_files = 65535 - innodb_max_dirty_pages_pct = 50 - innodb_lru_scan_depth = 4000 - innodb_checksum_algorithm = crc32 - innodb_lock_wait_timeout = 10 - innodb_rollback_on_timeout = 1 - innodb_print_all_deadlocks = 1 - innodb_file_per_table = 1 - innodb_online_alter_log_max_size = 4G - innodb_stats_on_metadata = 0 - innodb_thread_concurrency = 0 - innodb_sync_spin_loops = 100 - innodb_spin_wait_delay = 30 - lock_wait_timeout = 3600 - slow_query_log = 1 - long_query_time = 10 - log_queries_not_using_indexes =1 - log_throttle_queries_not_using_indexes = 60 - min_examined_row_limit = 100 - log_slow_admin_statements = 1 - log_slow_slave_statements = 1 - default_authentication_plugin=mysql_native_password - skip-name-resolve=1 - explicit_defaults_for_timestamp=1 - plugin_dir=/opt/bitnami/mysql/plugin - max_allowed_packet=128M - max_connections = 2000 - max_connect_errors = 1000000 - table_definition_cache=2000 - table_open_cache_instances=64 - tablespace_definition_cache=1024 - thread_cache_size=256 - interactive_timeout = 600 - wait_timeout = 600 - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=32M - bind-address=0.0.0.0 - performance_schema = 1 - performance_schema_instrument = '%memory%=on' - performance_schema_instrument = '%lock%=on' - innodb_monitor_enable=ALL - - [mysql] - no-auto-rehash - - [mysqldump] - quick - max_allowed_packet = 32M - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql-init-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - create_users_grants_core.sql: |- - create - user zyly@'%' identified by 'Cmii@451315'; - grant select on *.* to zyly@'%'; - create - user zyly_qc@'%' identified by 'Uh)E_owCyb16'; - grant all - on *.* to zyly_qc@'%'; - create - user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; - grant all - on *.* to k8s_admin@'%'; - create - user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; - grant all - on *.* to audit_dba@'%'; - create - user db_backup@'%' identified by 'RU5Pu(4FGdT9'; - GRANT - SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT - on *.* to db_backup@'%'; - create - user monitor@'%' identified by 'PL3#nGtrWbf-'; - grant REPLICATION - CLIENT on *.* to monitor@'%'; - flush - privileges; ---- -kind: Service -apiVersion: v1 -metadata: - name: cmii-mysql - namespace: bjtg - labels: - app.kubernetes.io/component: primary - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - octopus.control: mysql-db-wdd -spec: - ports: - - name: mysql - protocol: TCP - port: 13306 - targetPort: mysql - selector: - app.kubernetes.io/component: primary - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql-headless - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: mysql - port: 3306 - targetPort: mysql - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: NodePort - ports: - - name: mysql - port: 3306 - protocol: TCP - targetPort: mysql - nodePort: 33306 - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - serviceName: helm-mysql - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - spec: - serviceAccountName: helm-mysql - affinity: { } - nodeSelector: - mysql-deploy: "true" - securityContext: - fsGroup: 1001 - initContainers: - - name: change-volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:11-debian-11-r136 - imagePullPolicy: "Always" - command: - - /bin/bash - - -ec - - | - chown -R 1001:1001 /bitnami/mysql - securityContext: - runAsUser: 0 - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - containers: - - name: mysql - image: 10.250.0.110:8033/cmii/mysql:8.1.0-debian-11-r42 - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - env: - - name: BITNAMI_DEBUG - value: "true" - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: helm-mysql - key: mysql-root-password - - name: MYSQL_DATABASE - value: "cmii" - ports: - - name: mysql - containerPort: 3306 - livenessProbe: - failureThreshold: 5 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - readinessProbe: - failureThreshold: 5 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - startupProbe: - failureThreshold: 60 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - resources: - limits: { } - requests: { } - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - volumes: - - name: config - configMap: - name: helm-mysql - - name: custom-init-scripts - configMap: - name: helm-mysql-init-scripts - - name: mysql-data - hostPath: - path: /var/lib/docker/mysql-pv ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - annotations: -secrets: - - name: helm-mysql ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - mysql-root-password: "UXpmWFFoZDNiUQ==" - mysql-password: "S0F0cm5PckFKNw==" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - my.cnf: |- - - [mysqld] - port=3306 - basedir=/opt/bitnami/mysql - datadir=/bitnami/mysql/data - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - socket=/opt/bitnami/mysql/tmp/mysql.sock - log-error=/bitnami/mysql/data/error.log - general_log_file = /bitnami/mysql/data/general.log - slow_query_log_file = /bitnami/mysql/data/slow.log - innodb_data_file_path = ibdata1:512M:autoextend - innodb_buffer_pool_size = 512M - innodb_buffer_pool_instances = 2 - innodb_log_file_size = 512M - innodb_log_files_in_group = 4 - innodb_log_files_in_group = 4 - log-bin = /bitnami/mysql/data/mysql-bin - max_binlog_size=1G - transaction_isolation = REPEATABLE-READ - default_storage_engine = innodb - character-set-server = utf8mb4 - collation-server=utf8mb4_bin - binlog_format = ROW - binlog_rows_query_log_events=on - binlog_cache_size=4M - binlog_expire_logs_seconds = 1296000 - max_binlog_cache_size=2G - gtid_mode = on - enforce_gtid_consistency = 1 - sync_binlog = 1 - innodb_flush_log_at_trx_commit = 1 - innodb_flush_method = O_DIRECT - log_slave_updates=1 - relay_log_recovery = 1 - relay-log-purge = 1 - default_time_zone = '+08:00' - lower_case_table_names=1 - log_bin_trust_function_creators=1 - group_concat_max_len=67108864 - innodb_io_capacity = 4000 - innodb_io_capacity_max = 8000 - innodb_flush_sync = 0 - innodb_flush_neighbors = 0 - innodb_write_io_threads = 8 - innodb_read_io_threads = 8 - innodb_purge_threads = 4 - innodb_page_cleaners = 4 - innodb_open_files = 65535 - innodb_max_dirty_pages_pct = 50 - innodb_lru_scan_depth = 4000 - innodb_checksum_algorithm = crc32 - innodb_lock_wait_timeout = 10 - innodb_rollback_on_timeout = 1 - innodb_print_all_deadlocks = 1 - innodb_file_per_table = 1 - innodb_online_alter_log_max_size = 4G - innodb_stats_on_metadata = 0 - innodb_thread_concurrency = 0 - innodb_sync_spin_loops = 100 - innodb_spin_wait_delay = 30 - lock_wait_timeout = 3600 - slow_query_log = 1 - long_query_time = 10 - log_queries_not_using_indexes =1 - log_throttle_queries_not_using_indexes = 60 - min_examined_row_limit = 100 - log_slow_admin_statements = 1 - log_slow_slave_statements = 1 - default_authentication_plugin=mysql_native_password - skip-name-resolve=1 - explicit_defaults_for_timestamp=1 - plugin_dir=/opt/bitnami/mysql/plugin - max_allowed_packet=128M - max_connections = 2000 - max_connect_errors = 1000000 - table_definition_cache=2000 - table_open_cache_instances=64 - tablespace_definition_cache=1024 - thread_cache_size=256 - interactive_timeout = 600 - wait_timeout = 600 - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=32M - bind-address=0.0.0.0 - performance_schema = 1 - performance_schema_instrument = '%memory%=on' - performance_schema_instrument = '%lock%=on' - innodb_monitor_enable=ALL - - [mysql] - no-auto-rehash - - [mysqldump] - quick - max_allowed_packet = 32M - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql-init-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - create_users_grants_core.sql: |- - create - user zyly@'%' identified by 'Cmii@451315'; - grant select on *.* to zyly@'%'; - create - user zyly_qc@'%' identified by 'Uh)E_owCyb16'; - grant all - on *.* to zyly_qc@'%'; - create - user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; - grant all - on *.* to k8s_admin@'%'; - create - user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; - grant all - on *.* to audit_dba@'%'; - create - user db_backup@'%' identified by 'RU5Pu(4FGdT9'; - GRANT - SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT - on *.* to db_backup@'%'; - create - user monitor@'%' identified by 'PL3#nGtrWbf-'; - grant REPLICATION - CLIENT on *.* to monitor@'%'; - flush - privileges; ---- -kind: Service -apiVersion: v1 -metadata: - name: cmii-mysql - namespace: bjtg - labels: - app.kubernetes.io/component: primary - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - octopus.control: mysql-db-wdd -spec: - ports: - - name: mysql - protocol: TCP - port: 13306 - targetPort: mysql - selector: - app.kubernetes.io/component: primary - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql-headless - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: mysql - port: 3306 - targetPort: mysql - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: NodePort - ports: - - name: mysql - port: 3306 - protocol: TCP - targetPort: mysql - nodePort: 33306 - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - serviceName: helm-mysql - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - spec: - serviceAccountName: helm-mysql - affinity: { } - nodeSelector: - mysql-deploy: "true" - securityContext: - fsGroup: 1001 - initContainers: - - name: change-volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:11-debian-11-r136 - imagePullPolicy: "Always" - command: - - /bin/bash - - -ec - - | - chown -R 1001:1001 /bitnami/mysql - securityContext: - runAsUser: 0 - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - containers: - - name: mysql - image: 10.250.0.110:8033/cmii/mysql:8.1.0-debian-11-r42 - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - env: - - name: BITNAMI_DEBUG - value: "true" - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: helm-mysql - key: mysql-root-password - - name: MYSQL_DATABASE - value: "cmii" - ports: - - name: mysql - containerPort: 3306 - livenessProbe: - failureThreshold: 5 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - readinessProbe: - failureThreshold: 5 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - startupProbe: - failureThreshold: 60 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - resources: - limits: { } - requests: { } - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - volumes: - - name: config - configMap: - name: helm-mysql - - name: custom-init-scripts - configMap: - name: helm-mysql-init-scripts - - name: mysql-data - hostPath: - path: /var/lib/docker/mysql-pv ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - annotations: -secrets: - - name: helm-mysql ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - mysql-root-password: "UXpmWFFoZDNiUQ==" - mysql-password: "S0F0cm5PckFKNw==" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - my.cnf: |- - - [mysqld] - port=3306 - basedir=/opt/bitnami/mysql - datadir=/bitnami/mysql/data - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - socket=/opt/bitnami/mysql/tmp/mysql.sock - log-error=/bitnami/mysql/data/error.log - general_log_file = /bitnami/mysql/data/general.log - slow_query_log_file = /bitnami/mysql/data/slow.log - innodb_data_file_path = ibdata1:512M:autoextend - innodb_buffer_pool_size = 512M - innodb_buffer_pool_instances = 2 - innodb_log_file_size = 512M - innodb_log_files_in_group = 4 - innodb_log_files_in_group = 4 - log-bin = /bitnami/mysql/data/mysql-bin - max_binlog_size=1G - transaction_isolation = REPEATABLE-READ - default_storage_engine = innodb - character-set-server = utf8mb4 - collation-server=utf8mb4_bin - binlog_format = ROW - binlog_rows_query_log_events=on - binlog_cache_size=4M - binlog_expire_logs_seconds = 1296000 - max_binlog_cache_size=2G - gtid_mode = on - enforce_gtid_consistency = 1 - sync_binlog = 1 - innodb_flush_log_at_trx_commit = 1 - innodb_flush_method = O_DIRECT - log_slave_updates=1 - relay_log_recovery = 1 - relay-log-purge = 1 - default_time_zone = '+08:00' - lower_case_table_names=1 - log_bin_trust_function_creators=1 - group_concat_max_len=67108864 - innodb_io_capacity = 4000 - innodb_io_capacity_max = 8000 - innodb_flush_sync = 0 - innodb_flush_neighbors = 0 - innodb_write_io_threads = 8 - innodb_read_io_threads = 8 - innodb_purge_threads = 4 - innodb_page_cleaners = 4 - innodb_open_files = 65535 - innodb_max_dirty_pages_pct = 50 - innodb_lru_scan_depth = 4000 - innodb_checksum_algorithm = crc32 - innodb_lock_wait_timeout = 10 - innodb_rollback_on_timeout = 1 - innodb_print_all_deadlocks = 1 - innodb_file_per_table = 1 - innodb_online_alter_log_max_size = 4G - innodb_stats_on_metadata = 0 - innodb_thread_concurrency = 0 - innodb_sync_spin_loops = 100 - innodb_spin_wait_delay = 30 - lock_wait_timeout = 3600 - slow_query_log = 1 - long_query_time = 10 - log_queries_not_using_indexes =1 - log_throttle_queries_not_using_indexes = 60 - min_examined_row_limit = 100 - log_slow_admin_statements = 1 - log_slow_slave_statements = 1 - default_authentication_plugin=mysql_native_password - skip-name-resolve=1 - explicit_defaults_for_timestamp=1 - plugin_dir=/opt/bitnami/mysql/plugin - max_allowed_packet=128M - max_connections = 2000 - max_connect_errors = 1000000 - table_definition_cache=2000 - table_open_cache_instances=64 - tablespace_definition_cache=1024 - thread_cache_size=256 - interactive_timeout = 600 - wait_timeout = 600 - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=32M - bind-address=0.0.0.0 - performance_schema = 1 - performance_schema_instrument = '%memory%=on' - performance_schema_instrument = '%lock%=on' - innodb_monitor_enable=ALL - - [mysql] - no-auto-rehash - - [mysqldump] - quick - max_allowed_packet = 32M - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql-init-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - create_users_grants_core.sql: |- - create - user zyly@'%' identified by 'Cmii@451315'; - grant select on *.* to zyly@'%'; - create - user zyly_qc@'%' identified by 'Uh)E_owCyb16'; - grant all - on *.* to zyly_qc@'%'; - create - user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; - grant all - on *.* to k8s_admin@'%'; - create - user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; - grant all - on *.* to audit_dba@'%'; - create - user db_backup@'%' identified by 'RU5Pu(4FGdT9'; - GRANT - SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT - on *.* to db_backup@'%'; - create - user monitor@'%' identified by 'PL3#nGtrWbf-'; - grant REPLICATION - CLIENT on *.* to monitor@'%'; - flush - privileges; ---- -kind: Service -apiVersion: v1 -metadata: - name: cmii-mysql - namespace: bjtg - labels: - app.kubernetes.io/component: primary - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - octopus.control: mysql-db-wdd -spec: - ports: - - name: mysql - protocol: TCP - port: 13306 - targetPort: mysql - selector: - app.kubernetes.io/component: primary - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql-headless - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: mysql - port: 3306 - targetPort: mysql - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: NodePort - ports: - - name: mysql - port: 3306 - protocol: TCP - targetPort: mysql - nodePort: 33306 - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - serviceName: helm-mysql - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - spec: - serviceAccountName: helm-mysql - affinity: { } - nodeSelector: - mysql-deploy: "true" - securityContext: - fsGroup: 1001 - initContainers: - - name: change-volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:11-debian-11-r136 - imagePullPolicy: "Always" - command: - - /bin/bash - - -ec - - | - chown -R 1001:1001 /bitnami/mysql - securityContext: - runAsUser: 0 - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - containers: - - name: mysql - image: 10.250.0.110:8033/cmii/mysql:8.1.0-debian-11-r42 - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - env: - - name: BITNAMI_DEBUG - value: "true" - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: helm-mysql - key: mysql-root-password - - name: MYSQL_DATABASE - value: "cmii" - ports: - - name: mysql - containerPort: 3306 - livenessProbe: - failureThreshold: 5 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - readinessProbe: - failureThreshold: 5 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - startupProbe: - failureThreshold: 60 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - resources: - limits: { } - requests: { } - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - volumes: - - name: config - configMap: - name: helm-mysql - - name: custom-init-scripts - configMap: - name: helm-mysql-init-scripts - - name: mysql-data - hostPath: - path: /var/lib/docker/mysql-pv ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - annotations: -secrets: - - name: helm-mysql ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - mysql-root-password: "UXpmWFFoZDNiUQ==" - mysql-password: "S0F0cm5PckFKNw==" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - my.cnf: |- - - [mysqld] - port=3306 - basedir=/opt/bitnami/mysql - datadir=/bitnami/mysql/data - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - socket=/opt/bitnami/mysql/tmp/mysql.sock - log-error=/bitnami/mysql/data/error.log - general_log_file = /bitnami/mysql/data/general.log - slow_query_log_file = /bitnami/mysql/data/slow.log - innodb_data_file_path = ibdata1:512M:autoextend - innodb_buffer_pool_size = 512M - innodb_buffer_pool_instances = 2 - innodb_log_file_size = 512M - innodb_log_files_in_group = 4 - innodb_log_files_in_group = 4 - log-bin = /bitnami/mysql/data/mysql-bin - max_binlog_size=1G - transaction_isolation = REPEATABLE-READ - default_storage_engine = innodb - character-set-server = utf8mb4 - collation-server=utf8mb4_bin - binlog_format = ROW - binlog_rows_query_log_events=on - binlog_cache_size=4M - binlog_expire_logs_seconds = 1296000 - max_binlog_cache_size=2G - gtid_mode = on - enforce_gtid_consistency = 1 - sync_binlog = 1 - innodb_flush_log_at_trx_commit = 1 - innodb_flush_method = O_DIRECT - log_slave_updates=1 - relay_log_recovery = 1 - relay-log-purge = 1 - default_time_zone = '+08:00' - lower_case_table_names=1 - log_bin_trust_function_creators=1 - group_concat_max_len=67108864 - innodb_io_capacity = 4000 - innodb_io_capacity_max = 8000 - innodb_flush_sync = 0 - innodb_flush_neighbors = 0 - innodb_write_io_threads = 8 - innodb_read_io_threads = 8 - innodb_purge_threads = 4 - innodb_page_cleaners = 4 - innodb_open_files = 65535 - innodb_max_dirty_pages_pct = 50 - innodb_lru_scan_depth = 4000 - innodb_checksum_algorithm = crc32 - innodb_lock_wait_timeout = 10 - innodb_rollback_on_timeout = 1 - innodb_print_all_deadlocks = 1 - innodb_file_per_table = 1 - innodb_online_alter_log_max_size = 4G - innodb_stats_on_metadata = 0 - innodb_thread_concurrency = 0 - innodb_sync_spin_loops = 100 - innodb_spin_wait_delay = 30 - lock_wait_timeout = 3600 - slow_query_log = 1 - long_query_time = 10 - log_queries_not_using_indexes =1 - log_throttle_queries_not_using_indexes = 60 - min_examined_row_limit = 100 - log_slow_admin_statements = 1 - log_slow_slave_statements = 1 - default_authentication_plugin=mysql_native_password - skip-name-resolve=1 - explicit_defaults_for_timestamp=1 - plugin_dir=/opt/bitnami/mysql/plugin - max_allowed_packet=128M - max_connections = 2000 - max_connect_errors = 1000000 - table_definition_cache=2000 - table_open_cache_instances=64 - tablespace_definition_cache=1024 - thread_cache_size=256 - interactive_timeout = 600 - wait_timeout = 600 - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=32M - bind-address=0.0.0.0 - performance_schema = 1 - performance_schema_instrument = '%memory%=on' - performance_schema_instrument = '%lock%=on' - innodb_monitor_enable=ALL - - [mysql] - no-auto-rehash - - [mysqldump] - quick - max_allowed_packet = 32M - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql-init-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - create_users_grants_core.sql: |- - create - user zyly@'%' identified by 'Cmii@451315'; - grant select on *.* to zyly@'%'; - create - user zyly_qc@'%' identified by 'Uh)E_owCyb16'; - grant all - on *.* to zyly_qc@'%'; - create - user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; - grant all - on *.* to k8s_admin@'%'; - create - user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; - grant all - on *.* to audit_dba@'%'; - create - user db_backup@'%' identified by 'RU5Pu(4FGdT9'; - GRANT - SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT - on *.* to db_backup@'%'; - create - user monitor@'%' identified by 'PL3#nGtrWbf-'; - grant REPLICATION - CLIENT on *.* to monitor@'%'; - flush - privileges; ---- -kind: Service -apiVersion: v1 -metadata: - name: cmii-mysql - namespace: bjtg - labels: - app.kubernetes.io/component: primary - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - octopus.control: mysql-db-wdd -spec: - ports: - - name: mysql - protocol: TCP - port: 13306 - targetPort: mysql - selector: - app.kubernetes.io/component: primary - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql-headless - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: mysql - port: 3306 - targetPort: mysql - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: NodePort - ports: - - name: mysql - port: 3306 - protocol: TCP - targetPort: mysql - nodePort: 33306 - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - serviceName: helm-mysql - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - spec: - serviceAccountName: helm-mysql - affinity: { } - nodeSelector: - mysql-deploy: "true" - securityContext: - fsGroup: 1001 - initContainers: - - name: change-volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:11-debian-11-r136 - imagePullPolicy: "Always" - command: - - /bin/bash - - -ec - - | - chown -R 1001:1001 /bitnami/mysql - securityContext: - runAsUser: 0 - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - containers: - - name: mysql - image: 10.250.0.110:8033/cmii/mysql:8.1.0-debian-11-r42 - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - env: - - name: BITNAMI_DEBUG - value: "true" - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: helm-mysql - key: mysql-root-password - - name: MYSQL_DATABASE - value: "cmii" - ports: - - name: mysql - containerPort: 3306 - livenessProbe: - failureThreshold: 5 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - readinessProbe: - failureThreshold: 5 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - startupProbe: - failureThreshold: 60 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - resources: - limits: { } - requests: { } - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - volumes: - - name: config - configMap: - name: helm-mysql - - name: custom-init-scripts - configMap: - name: helm-mysql-init-scripts - - name: mysql-data - hostPath: - path: /var/lib/docker/mysql-pv ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - annotations: -secrets: - - name: helm-mysql ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - mysql-root-password: "UXpmWFFoZDNiUQ==" - mysql-password: "S0F0cm5PckFKNw==" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - my.cnf: |- - - [mysqld] - port=3306 - basedir=/opt/bitnami/mysql - datadir=/bitnami/mysql/data - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - socket=/opt/bitnami/mysql/tmp/mysql.sock - log-error=/bitnami/mysql/data/error.log - general_log_file = /bitnami/mysql/data/general.log - slow_query_log_file = /bitnami/mysql/data/slow.log - innodb_data_file_path = ibdata1:512M:autoextend - innodb_buffer_pool_size = 512M - innodb_buffer_pool_instances = 2 - innodb_log_file_size = 512M - innodb_log_files_in_group = 4 - innodb_log_files_in_group = 4 - log-bin = /bitnami/mysql/data/mysql-bin - max_binlog_size=1G - transaction_isolation = REPEATABLE-READ - default_storage_engine = innodb - character-set-server = utf8mb4 - collation-server=utf8mb4_bin - binlog_format = ROW - binlog_rows_query_log_events=on - binlog_cache_size=4M - binlog_expire_logs_seconds = 1296000 - max_binlog_cache_size=2G - gtid_mode = on - enforce_gtid_consistency = 1 - sync_binlog = 1 - innodb_flush_log_at_trx_commit = 1 - innodb_flush_method = O_DIRECT - log_slave_updates=1 - relay_log_recovery = 1 - relay-log-purge = 1 - default_time_zone = '+08:00' - lower_case_table_names=1 - log_bin_trust_function_creators=1 - group_concat_max_len=67108864 - innodb_io_capacity = 4000 - innodb_io_capacity_max = 8000 - innodb_flush_sync = 0 - innodb_flush_neighbors = 0 - innodb_write_io_threads = 8 - innodb_read_io_threads = 8 - innodb_purge_threads = 4 - innodb_page_cleaners = 4 - innodb_open_files = 65535 - innodb_max_dirty_pages_pct = 50 - innodb_lru_scan_depth = 4000 - innodb_checksum_algorithm = crc32 - innodb_lock_wait_timeout = 10 - innodb_rollback_on_timeout = 1 - innodb_print_all_deadlocks = 1 - innodb_file_per_table = 1 - innodb_online_alter_log_max_size = 4G - innodb_stats_on_metadata = 0 - innodb_thread_concurrency = 0 - innodb_sync_spin_loops = 100 - innodb_spin_wait_delay = 30 - lock_wait_timeout = 3600 - slow_query_log = 1 - long_query_time = 10 - log_queries_not_using_indexes =1 - log_throttle_queries_not_using_indexes = 60 - min_examined_row_limit = 100 - log_slow_admin_statements = 1 - log_slow_slave_statements = 1 - default_authentication_plugin=mysql_native_password - skip-name-resolve=1 - explicit_defaults_for_timestamp=1 - plugin_dir=/opt/bitnami/mysql/plugin - max_allowed_packet=128M - max_connections = 2000 - max_connect_errors = 1000000 - table_definition_cache=2000 - table_open_cache_instances=64 - tablespace_definition_cache=1024 - thread_cache_size=256 - interactive_timeout = 600 - wait_timeout = 600 - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=32M - bind-address=0.0.0.0 - performance_schema = 1 - performance_schema_instrument = '%memory%=on' - performance_schema_instrument = '%lock%=on' - innodb_monitor_enable=ALL - - [mysql] - no-auto-rehash - - [mysqldump] - quick - max_allowed_packet = 32M - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql-init-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - create_users_grants_core.sql: |- - create - user zyly@'%' identified by 'Cmii@451315'; - grant select on *.* to zyly@'%'; - create - user zyly_qc@'%' identified by 'Uh)E_owCyb16'; - grant all - on *.* to zyly_qc@'%'; - create - user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; - grant all - on *.* to k8s_admin@'%'; - create - user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; - grant all - on *.* to audit_dba@'%'; - create - user db_backup@'%' identified by 'RU5Pu(4FGdT9'; - GRANT - SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT - on *.* to db_backup@'%'; - create - user monitor@'%' identified by 'PL3#nGtrWbf-'; - grant REPLICATION - CLIENT on *.* to monitor@'%'; - flush - privileges; ---- -kind: Service -apiVersion: v1 -metadata: - name: cmii-mysql - namespace: bjtg - labels: - app.kubernetes.io/component: primary - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - octopus.control: mysql-db-wdd -spec: - ports: - - name: mysql - protocol: TCP - port: 13306 - targetPort: mysql - selector: - app.kubernetes.io/component: primary - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql-headless - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: mysql - port: 3306 - targetPort: mysql - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: NodePort - ports: - - name: mysql - port: 3306 - protocol: TCP - targetPort: mysql - nodePort: 33306 - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - serviceName: helm-mysql - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - spec: - serviceAccountName: helm-mysql - affinity: { } - nodeSelector: - mysql-deploy: "true" - securityContext: - fsGroup: 1001 - initContainers: - - name: change-volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:11-debian-11-r136 - imagePullPolicy: "Always" - command: - - /bin/bash - - -ec - - | - chown -R 1001:1001 /bitnami/mysql - securityContext: - runAsUser: 0 - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - containers: - - name: mysql - image: 10.250.0.110:8033/cmii/mysql:8.1.0-debian-11-r42 - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - env: - - name: BITNAMI_DEBUG - value: "true" - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: helm-mysql - key: mysql-root-password - - name: MYSQL_DATABASE - value: "cmii" - ports: - - name: mysql - containerPort: 3306 - livenessProbe: - failureThreshold: 5 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - readinessProbe: - failureThreshold: 5 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - startupProbe: - failureThreshold: 60 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - resources: - limits: { } - requests: { } - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - volumes: - - name: config - configMap: - name: helm-mysql - - name: custom-init-scripts - configMap: - name: helm-mysql-init-scripts - - name: mysql-data - hostPath: - path: /var/lib/docker/mysql-pv ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - annotations: -secrets: - - name: helm-mysql ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - mysql-root-password: "UXpmWFFoZDNiUQ==" - mysql-password: "S0F0cm5PckFKNw==" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - my.cnf: |- - - [mysqld] - port=3306 - basedir=/opt/bitnami/mysql - datadir=/bitnami/mysql/data - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - socket=/opt/bitnami/mysql/tmp/mysql.sock - log-error=/bitnami/mysql/data/error.log - general_log_file = /bitnami/mysql/data/general.log - slow_query_log_file = /bitnami/mysql/data/slow.log - innodb_data_file_path = ibdata1:512M:autoextend - innodb_buffer_pool_size = 512M - innodb_buffer_pool_instances = 2 - innodb_log_file_size = 512M - innodb_log_files_in_group = 4 - innodb_log_files_in_group = 4 - log-bin = /bitnami/mysql/data/mysql-bin - max_binlog_size=1G - transaction_isolation = REPEATABLE-READ - default_storage_engine = innodb - character-set-server = utf8mb4 - collation-server=utf8mb4_bin - binlog_format = ROW - binlog_rows_query_log_events=on - binlog_cache_size=4M - binlog_expire_logs_seconds = 1296000 - max_binlog_cache_size=2G - gtid_mode = on - enforce_gtid_consistency = 1 - sync_binlog = 1 - innodb_flush_log_at_trx_commit = 1 - innodb_flush_method = O_DIRECT - log_slave_updates=1 - relay_log_recovery = 1 - relay-log-purge = 1 - default_time_zone = '+08:00' - lower_case_table_names=1 - log_bin_trust_function_creators=1 - group_concat_max_len=67108864 - innodb_io_capacity = 4000 - innodb_io_capacity_max = 8000 - innodb_flush_sync = 0 - innodb_flush_neighbors = 0 - innodb_write_io_threads = 8 - innodb_read_io_threads = 8 - innodb_purge_threads = 4 - innodb_page_cleaners = 4 - innodb_open_files = 65535 - innodb_max_dirty_pages_pct = 50 - innodb_lru_scan_depth = 4000 - innodb_checksum_algorithm = crc32 - innodb_lock_wait_timeout = 10 - innodb_rollback_on_timeout = 1 - innodb_print_all_deadlocks = 1 - innodb_file_per_table = 1 - innodb_online_alter_log_max_size = 4G - innodb_stats_on_metadata = 0 - innodb_thread_concurrency = 0 - innodb_sync_spin_loops = 100 - innodb_spin_wait_delay = 30 - lock_wait_timeout = 3600 - slow_query_log = 1 - long_query_time = 10 - log_queries_not_using_indexes =1 - log_throttle_queries_not_using_indexes = 60 - min_examined_row_limit = 100 - log_slow_admin_statements = 1 - log_slow_slave_statements = 1 - default_authentication_plugin=mysql_native_password - skip-name-resolve=1 - explicit_defaults_for_timestamp=1 - plugin_dir=/opt/bitnami/mysql/plugin - max_allowed_packet=128M - max_connections = 2000 - max_connect_errors = 1000000 - table_definition_cache=2000 - table_open_cache_instances=64 - tablespace_definition_cache=1024 - thread_cache_size=256 - interactive_timeout = 600 - wait_timeout = 600 - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=32M - bind-address=0.0.0.0 - performance_schema = 1 - performance_schema_instrument = '%memory%=on' - performance_schema_instrument = '%lock%=on' - innodb_monitor_enable=ALL - - [mysql] - no-auto-rehash - - [mysqldump] - quick - max_allowed_packet = 32M - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-mysql-init-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: primary -data: - create_users_grants_core.sql: |- - create - user zyly@'%' identified by 'Cmii@451315'; - grant select on *.* to zyly@'%'; - create - user zyly_qc@'%' identified by 'Uh)E_owCyb16'; - grant all - on *.* to zyly_qc@'%'; - create - user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; - grant all - on *.* to k8s_admin@'%'; - create - user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; - grant all - on *.* to audit_dba@'%'; - create - user db_backup@'%' identified by 'RU5Pu(4FGdT9'; - GRANT - SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT - on *.* to db_backup@'%'; - create - user monitor@'%' identified by 'PL3#nGtrWbf-'; - grant REPLICATION - CLIENT on *.* to monitor@'%'; - flush - privileges; ---- -kind: Service -apiVersion: v1 -metadata: - name: cmii-mysql - namespace: bjtg - labels: - app.kubernetes.io/component: primary - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - octopus.control: mysql-db-wdd -spec: - ports: - - name: mysql - protocol: TCP - port: 13306 - targetPort: mysql - selector: - app.kubernetes.io/component: primary - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.app: mysql - cmii.type: middleware - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql-headless - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: mysql - port: 3306 - targetPort: mysql - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - annotations: -spec: - type: NodePort - ports: - - name: mysql - port: 3306 - protocol: TCP - targetPort: mysql - nodePort: 33306 - selector: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-mysql - namespace: bjtg - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: mysql-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - serviceName: helm-mysql - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd - labels: - app.kubernetes.io/name: mysql-db - octopus.control: mysql-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: mysql - app.kubernetes.io/component: primary - spec: - serviceAccountName: helm-mysql - affinity: { } - nodeSelector: - mysql-deploy: "true" - securityContext: - fsGroup: 1001 - initContainers: - - name: change-volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:11-debian-11-r136 - imagePullPolicy: "Always" - command: - - /bin/bash - - -ec - - | - chown -R 1001:1001 /bitnami/mysql - securityContext: - runAsUser: 0 - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - containers: - - name: mysql - image: 10.250.0.110:8033/cmii/mysql:8.1.0-debian-11-r42 - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - env: - - name: BITNAMI_DEBUG - value: "true" - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: helm-mysql - key: mysql-root-password - - name: MYSQL_DATABASE - value: "cmii" - ports: - - name: mysql - containerPort: 3306 - livenessProbe: - failureThreshold: 5 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - readinessProbe: - failureThreshold: 5 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 3 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - startupProbe: - failureThreshold: 60 - initialDelaySeconds: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - exec: - command: - - /bin/bash - - -ec - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then - password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") - fi - mysqladmin status -uroot -p"${password_aux}" - resources: - limits: { } - requests: { } - volumeMounts: - - name: mysql-data - mountPath: /bitnami/mysql - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - volumes: - - name: config - configMap: - name: helm-mysql - - name: custom-init-scripts - configMap: - name: helm-mysql-init-scripts - - name: mysql-data - hostPath: - path: /var/lib/docker/mysql-pv diff --git a/agent-operator/deploy/z_bjtg/k8s-nacos.yaml b/agent-operator/deploy/z_bjtg/k8s-nacos.yaml deleted file mode 100755 index a415b09..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-nacos.yaml +++ /dev/null @@ -1,756 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-nacos-cm - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -data: - mysql.db.name: "cmii_nacos_config" - mysql.db.host: "helm-mysql" - mysql.port: "3306" - mysql.user: "k8s_admin" - mysql.password: "fP#UaH6qQ3)8" ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.app: helm-nacos - cmii.type: middleware - ports: - - port: 8848 - name: server - targetPort: 8848 - nodePort: 38989 - - port: 9848 - name: server12 - targetPort: 9848 - nodePort: 38912 - - port: 9849 - name: server23 - targetPort: 9849 - nodePort: 38923 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-nacos - replicas: 1 - selector: - matchLabels: - cmii.app: helm-nacos - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: nacos-server - image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2 - ports: - - containerPort: 8848 - name: dashboard - env: - - name: NACOS_AUTH_ENABLE - value: "false" - - name: NACOS_REPLICAS - value: "1" - - name: MYSQL_SERVICE_DB_NAME - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.name - - name: MYSQL_SERVICE_PORT - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.port - - name: MYSQL_SERVICE_USER - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.user - - name: MYSQL_SERVICE_PASSWORD - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.password - - name: MYSQL_SERVICE_HOST - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.host - - name: NACOS_SERVER_PORT - value: "8848" - - name: NACOS_APPLICATION_PORT - value: "8848" - - name: PREFER_HOST_MODE - value: "hostname" - - name: MODE - value: standalone - - name: SPRING_DATASOURCE_PLATFORM - value: mysql ---- ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-nacos-cm - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -data: - mysql.db.name: "cmii_nacos_config" - mysql.db.host: "helm-mysql" - mysql.port: "3306" - mysql.user: "k8s_admin" - mysql.password: "fP#UaH6qQ3)8" ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.app: helm-nacos - cmii.type: middleware - ports: - - port: 8848 - name: server - targetPort: 8848 - nodePort: 38989 - - port: 9848 - name: server12 - targetPort: 9848 - nodePort: 38912 - - port: 9849 - name: server23 - targetPort: 9849 - nodePort: 38923 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-nacos - replicas: 1 - selector: - matchLabels: - cmii.app: helm-nacos - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: nacos-server - image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2 - ports: - - containerPort: 8848 - name: dashboard - env: - - name: NACOS_AUTH_ENABLE - value: "false" - - name: NACOS_REPLICAS - value: "1" - - name: MYSQL_SERVICE_DB_NAME - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.name - - name: MYSQL_SERVICE_PORT - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.port - - name: MYSQL_SERVICE_USER - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.user - - name: MYSQL_SERVICE_PASSWORD - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.password - - name: MYSQL_SERVICE_HOST - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.host - - name: NACOS_SERVER_PORT - value: "8848" - - name: NACOS_APPLICATION_PORT - value: "8848" - - name: PREFER_HOST_MODE - value: "hostname" - - name: MODE - value: standalone - - name: SPRING_DATASOURCE_PLATFORM - value: mysql ---- ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-nacos-cm - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -data: - mysql.db.name: "cmii_nacos_config" - mysql.db.host: "helm-mysql" - mysql.port: "3306" - mysql.user: "k8s_admin" - mysql.password: "fP#UaH6qQ3)8" ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.app: helm-nacos - cmii.type: middleware - ports: - - port: 8848 - name: server - targetPort: 8848 - nodePort: 38989 - - port: 9848 - name: server12 - targetPort: 9848 - nodePort: 38912 - - port: 9849 - name: server23 - targetPort: 9849 - nodePort: 38923 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-nacos - replicas: 1 - selector: - matchLabels: - cmii.app: helm-nacos - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: nacos-server - image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2 - ports: - - containerPort: 8848 - name: dashboard - env: - - name: NACOS_AUTH_ENABLE - value: "false" - - name: NACOS_REPLICAS - value: "1" - - name: MYSQL_SERVICE_DB_NAME - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.name - - name: MYSQL_SERVICE_PORT - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.port - - name: MYSQL_SERVICE_USER - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.user - - name: MYSQL_SERVICE_PASSWORD - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.password - - name: MYSQL_SERVICE_HOST - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.host - - name: NACOS_SERVER_PORT - value: "8848" - - name: NACOS_APPLICATION_PORT - value: "8848" - - name: PREFER_HOST_MODE - value: "hostname" - - name: MODE - value: standalone - - name: SPRING_DATASOURCE_PLATFORM - value: mysql ---- ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-nacos-cm - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -data: - mysql.db.name: "cmii_nacos_config" - mysql.db.host: "helm-mysql" - mysql.port: "3306" - mysql.user: "k8s_admin" - mysql.password: "fP#UaH6qQ3)8" ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.app: helm-nacos - cmii.type: middleware - ports: - - port: 8848 - name: server - targetPort: 8848 - nodePort: 38989 - - port: 9848 - name: server12 - targetPort: 9848 - nodePort: 38912 - - port: 9849 - name: server23 - targetPort: 9849 - nodePort: 38923 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-nacos - replicas: 1 - selector: - matchLabels: - cmii.app: helm-nacos - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: nacos-server - image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2 - ports: - - containerPort: 8848 - name: dashboard - env: - - name: NACOS_AUTH_ENABLE - value: "false" - - name: NACOS_REPLICAS - value: "1" - - name: MYSQL_SERVICE_DB_NAME - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.name - - name: MYSQL_SERVICE_PORT - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.port - - name: MYSQL_SERVICE_USER - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.user - - name: MYSQL_SERVICE_PASSWORD - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.password - - name: MYSQL_SERVICE_HOST - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.host - - name: NACOS_SERVER_PORT - value: "8848" - - name: NACOS_APPLICATION_PORT - value: "8848" - - name: PREFER_HOST_MODE - value: "hostname" - - name: MODE - value: standalone - - name: SPRING_DATASOURCE_PLATFORM - value: mysql ---- ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-nacos-cm - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -data: - mysql.db.name: "cmii_nacos_config" - mysql.db.host: "helm-mysql" - mysql.port: "3306" - mysql.user: "k8s_admin" - mysql.password: "fP#UaH6qQ3)8" ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.app: helm-nacos - cmii.type: middleware - ports: - - port: 8848 - name: server - targetPort: 8848 - nodePort: 38989 - - port: 9848 - name: server12 - targetPort: 9848 - nodePort: 38912 - - port: 9849 - name: server23 - targetPort: 9849 - nodePort: 38923 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-nacos - replicas: 1 - selector: - matchLabels: - cmii.app: helm-nacos - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: nacos-server - image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2 - ports: - - containerPort: 8848 - name: dashboard - env: - - name: NACOS_AUTH_ENABLE - value: "false" - - name: NACOS_REPLICAS - value: "1" - - name: MYSQL_SERVICE_DB_NAME - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.name - - name: MYSQL_SERVICE_PORT - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.port - - name: MYSQL_SERVICE_USER - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.user - - name: MYSQL_SERVICE_PASSWORD - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.password - - name: MYSQL_SERVICE_HOST - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.host - - name: NACOS_SERVER_PORT - value: "8848" - - name: NACOS_APPLICATION_PORT - value: "8848" - - name: PREFER_HOST_MODE - value: "hostname" - - name: MODE - value: standalone - - name: SPRING_DATASOURCE_PLATFORM - value: mysql ---- ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-nacos-cm - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -data: - mysql.db.name: "cmii_nacos_config" - mysql.db.host: "helm-mysql" - mysql.port: "3306" - mysql.user: "k8s_admin" - mysql.password: "fP#UaH6qQ3)8" ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - type: NodePort - selector: - cmii.app: helm-nacos - cmii.type: middleware - ports: - - port: 8848 - name: server - targetPort: 8848 - nodePort: 38989 - - port: 9848 - name: server12 - targetPort: 9848 - nodePort: 38912 - - port: 9849 - name: server23 - targetPort: 9849 - nodePort: 38923 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-nacos - namespace: bjtg - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/version: 5.5.0 -spec: - serviceName: helm-nacos - replicas: 1 - selector: - matchLabels: - cmii.app: helm-nacos - cmii.type: middleware - template: - metadata: - labels: - cmii.app: helm-nacos - cmii.type: middleware - octopus.control: nacos-wdd - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/version: 5.5.0 - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - affinity: { } - containers: - - name: nacos-server - image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2 - ports: - - containerPort: 8848 - name: dashboard - env: - - name: NACOS_AUTH_ENABLE - value: "false" - - name: NACOS_REPLICAS - value: "1" - - name: MYSQL_SERVICE_DB_NAME - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.name - - name: MYSQL_SERVICE_PORT - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.port - - name: MYSQL_SERVICE_USER - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.user - - name: MYSQL_SERVICE_PASSWORD - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.password - - name: MYSQL_SERVICE_HOST - valueFrom: - configMapKeyRef: - name: helm-nacos-cm - key: mysql.db.host - - name: NACOS_SERVER_PORT - value: "8848" - - name: NACOS_APPLICATION_PORT - value: "8848" - - name: PREFER_HOST_MODE - value: "hostname" - - name: MODE - value: standalone - - name: SPRING_DATASOURCE_PLATFORM - value: mysql ---- diff --git a/agent-operator/deploy/z_bjtg/k8s-nfs-test.yaml b/agent-operator/deploy/z_bjtg/k8s-nfs-test.yaml deleted file mode 100755 index 5ff83c4..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-nfs-test.yaml +++ /dev/null @@ -1,216 +0,0 @@ ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-claim - annotations: - volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 -spec: - accessModes: - - ReadWriteOnce - storageClassName: nfs-prod-distribute - resources: - requests: - storage: 1Mi ---- -kind: Pod -apiVersion: v1 -metadata: - name: test-pod -spec: - containers: - - name: test-pod - image: 10.250.0.110:8033/cmii/busybox - command: - - "/bin/sh" - args: - - "-c" - - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 - volumeMounts: - - name: nfs-pvc - mountPath: "/mnt" - restartPolicy: "Never" - volumes: - - name: nfs-pvc - persistentVolumeClaim: - claimName: test-claim #与PVC名称保持一致 ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-claim - annotations: - volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 -spec: - accessModes: - - ReadWriteOnce - storageClassName: nfs-prod-distribute - resources: - requests: - storage: 1Mi ---- -kind: Pod -apiVersion: v1 -metadata: - name: test-pod -spec: - containers: - - name: test-pod - image: 10.250.0.110:8033/cmii/busybox - command: - - "/bin/sh" - args: - - "-c" - - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 - volumeMounts: - - name: nfs-pvc - mountPath: "/mnt" - restartPolicy: "Never" - volumes: - - name: nfs-pvc - persistentVolumeClaim: - claimName: test-claim #与PVC名称保持一致 ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-claim - annotations: - volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 -spec: - accessModes: - - ReadWriteOnce - storageClassName: nfs-prod-distribute - resources: - requests: - storage: 1Mi ---- -kind: Pod -apiVersion: v1 -metadata: - name: test-pod -spec: - containers: - - name: test-pod - image: 10.250.0.110:8033/cmii/busybox - command: - - "/bin/sh" - args: - - "-c" - - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 - volumeMounts: - - name: nfs-pvc - mountPath: "/mnt" - restartPolicy: "Never" - volumes: - - name: nfs-pvc - persistentVolumeClaim: - claimName: test-claim #与PVC名称保持一致 ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-claim - annotations: - volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 -spec: - accessModes: - - ReadWriteOnce - storageClassName: nfs-prod-distribute - resources: - requests: - storage: 1Mi ---- -kind: Pod -apiVersion: v1 -metadata: - name: test-pod -spec: - containers: - - name: test-pod - image: 10.250.0.110:8033/cmii/busybox - command: - - "/bin/sh" - args: - - "-c" - - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 - volumeMounts: - - name: nfs-pvc - mountPath: "/mnt" - restartPolicy: "Never" - volumes: - - name: nfs-pvc - persistentVolumeClaim: - claimName: test-claim #与PVC名称保持一致 ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-claim - annotations: - volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 -spec: - accessModes: - - ReadWriteOnce - storageClassName: nfs-prod-distribute - resources: - requests: - storage: 1Mi ---- -kind: Pod -apiVersion: v1 -metadata: - name: test-pod -spec: - containers: - - name: test-pod - image: 10.250.0.110:8033/cmii/busybox - command: - - "/bin/sh" - args: - - "-c" - - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 - volumeMounts: - - name: nfs-pvc - mountPath: "/mnt" - restartPolicy: "Never" - volumes: - - name: nfs-pvc - persistentVolumeClaim: - claimName: test-claim #与PVC名称保持一致 ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-claim - annotations: - volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 -spec: - accessModes: - - ReadWriteOnce - storageClassName: nfs-prod-distribute - resources: - requests: - storage: 1Mi ---- -kind: Pod -apiVersion: v1 -metadata: - name: test-pod -spec: - containers: - - name: test-pod - image: 10.250.0.110:8033/cmii/busybox - command: - - "/bin/sh" - args: - - "-c" - - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 - volumeMounts: - - name: nfs-pvc - mountPath: "/mnt" - restartPolicy: "Never" - volumes: - - name: nfs-pvc - persistentVolumeClaim: - claimName: test-claim #与PVC名称保持一致 diff --git a/agent-operator/deploy/z_bjtg/k8s-nfs.yaml b/agent-operator/deploy/z_bjtg/k8s-nfs.yaml deleted file mode 100755 index 2a8f10e..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-nfs.yaml +++ /dev/null @@ -1,672 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #根据实际环境设定namespace,下面类同 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nfs-client-provisioner-runner -rules: - - apiGroups: [ "" ] - resources: [ "persistentvolumes" ] - verbs: [ "get", "list", "watch", "create", "delete" ] - - apiGroups: [ "" ] - resources: [ "persistentvolumeclaims" ] - verbs: [ "get", "list", "watch", "update" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "storageclasses" ] - verbs: [ "get", "list", "watch" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create", "update", "patch" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: ClusterRole - # name: nfs-client-provisioner-runner - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get", "list", "watch", "create", "update", "patch" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: Role - name: leader-locking-nfs-client-provisioner - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: nfs-prod-distribute -provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nfs-client-provisioner - labels: - app: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #与RBAC文件中的namespace保持一致 -spec: - replicas: 1 - selector: - matchLabels: - app: nfs-client-provisioner - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-client-provisioner - spec: - serviceAccountName: nfs-client-provisioner - containers: - - name: nfs-client-provisioner - image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 - volumeMounts: - - name: nfs-client-root - mountPath: /persistentvolumes - env: - - name: PROVISIONER_NAME - value: cmlc-nfs-storage - - name: NFS_SERVER - value: 10.250.0.110 - - name: NFS_PATH - value: /var/lib/docker/nfs_data - volumes: - - name: nfs-client-root - nfs: - server: 10.250.0.110 - path: /var/lib/docker/nfs_data ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #根据实际环境设定namespace,下面类同 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nfs-client-provisioner-runner -rules: - - apiGroups: [ "" ] - resources: [ "persistentvolumes" ] - verbs: [ "get", "list", "watch", "create", "delete" ] - - apiGroups: [ "" ] - resources: [ "persistentvolumeclaims" ] - verbs: [ "get", "list", "watch", "update" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "storageclasses" ] - verbs: [ "get", "list", "watch" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create", "update", "patch" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: ClusterRole - # name: nfs-client-provisioner-runner - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get", "list", "watch", "create", "update", "patch" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: Role - name: leader-locking-nfs-client-provisioner - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: nfs-prod-distribute -provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nfs-client-provisioner - labels: - app: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #与RBAC文件中的namespace保持一致 -spec: - replicas: 1 - selector: - matchLabels: - app: nfs-client-provisioner - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-client-provisioner - spec: - serviceAccountName: nfs-client-provisioner - containers: - - name: nfs-client-provisioner - image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 - volumeMounts: - - name: nfs-client-root - mountPath: /persistentvolumes - env: - - name: PROVISIONER_NAME - value: cmlc-nfs-storage - - name: NFS_SERVER - value: 10.250.0.110 - - name: NFS_PATH - value: /var/lib/docker/nfs_data - volumes: - - name: nfs-client-root - nfs: - server: 10.250.0.110 - path: /var/lib/docker/nfs_data ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #根据实际环境设定namespace,下面类同 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nfs-client-provisioner-runner -rules: - - apiGroups: [ "" ] - resources: [ "persistentvolumes" ] - verbs: [ "get", "list", "watch", "create", "delete" ] - - apiGroups: [ "" ] - resources: [ "persistentvolumeclaims" ] - verbs: [ "get", "list", "watch", "update" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "storageclasses" ] - verbs: [ "get", "list", "watch" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create", "update", "patch" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: ClusterRole - # name: nfs-client-provisioner-runner - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get", "list", "watch", "create", "update", "patch" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: Role - name: leader-locking-nfs-client-provisioner - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: nfs-prod-distribute -provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nfs-client-provisioner - labels: - app: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #与RBAC文件中的namespace保持一致 -spec: - replicas: 1 - selector: - matchLabels: - app: nfs-client-provisioner - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-client-provisioner - spec: - serviceAccountName: nfs-client-provisioner - containers: - - name: nfs-client-provisioner - image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 - volumeMounts: - - name: nfs-client-root - mountPath: /persistentvolumes - env: - - name: PROVISIONER_NAME - value: cmlc-nfs-storage - - name: NFS_SERVER - value: 10.250.0.110 - - name: NFS_PATH - value: /var/lib/docker/nfs_data - volumes: - - name: nfs-client-root - nfs: - server: 10.250.0.110 - path: /var/lib/docker/nfs_data ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #根据实际环境设定namespace,下面类同 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nfs-client-provisioner-runner -rules: - - apiGroups: [ "" ] - resources: [ "persistentvolumes" ] - verbs: [ "get", "list", "watch", "create", "delete" ] - - apiGroups: [ "" ] - resources: [ "persistentvolumeclaims" ] - verbs: [ "get", "list", "watch", "update" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "storageclasses" ] - verbs: [ "get", "list", "watch" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create", "update", "patch" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: ClusterRole - # name: nfs-client-provisioner-runner - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get", "list", "watch", "create", "update", "patch" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: Role - name: leader-locking-nfs-client-provisioner - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: nfs-prod-distribute -provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nfs-client-provisioner - labels: - app: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #与RBAC文件中的namespace保持一致 -spec: - replicas: 1 - selector: - matchLabels: - app: nfs-client-provisioner - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-client-provisioner - spec: - serviceAccountName: nfs-client-provisioner - containers: - - name: nfs-client-provisioner - image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 - volumeMounts: - - name: nfs-client-root - mountPath: /persistentvolumes - env: - - name: PROVISIONER_NAME - value: cmlc-nfs-storage - - name: NFS_SERVER - value: 10.250.0.110 - - name: NFS_PATH - value: /var/lib/docker/nfs_data - volumes: - - name: nfs-client-root - nfs: - server: 10.250.0.110 - path: /var/lib/docker/nfs_data ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #根据实际环境设定namespace,下面类同 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nfs-client-provisioner-runner -rules: - - apiGroups: [ "" ] - resources: [ "persistentvolumes" ] - verbs: [ "get", "list", "watch", "create", "delete" ] - - apiGroups: [ "" ] - resources: [ "persistentvolumeclaims" ] - verbs: [ "get", "list", "watch", "update" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "storageclasses" ] - verbs: [ "get", "list", "watch" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create", "update", "patch" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: ClusterRole - # name: nfs-client-provisioner-runner - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get", "list", "watch", "create", "update", "patch" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: Role - name: leader-locking-nfs-client-provisioner - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: nfs-prod-distribute -provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nfs-client-provisioner - labels: - app: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #与RBAC文件中的namespace保持一致 -spec: - replicas: 1 - selector: - matchLabels: - app: nfs-client-provisioner - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-client-provisioner - spec: - serviceAccountName: nfs-client-provisioner - containers: - - name: nfs-client-provisioner - image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 - volumeMounts: - - name: nfs-client-root - mountPath: /persistentvolumes - env: - - name: PROVISIONER_NAME - value: cmlc-nfs-storage - - name: NFS_SERVER - value: 10.250.0.110 - - name: NFS_PATH - value: /var/lib/docker/nfs_data - volumes: - - name: nfs-client-root - nfs: - server: 10.250.0.110 - path: /var/lib/docker/nfs_data ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #根据实际环境设定namespace,下面类同 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: nfs-client-provisioner-runner -rules: - - apiGroups: [ "" ] - resources: [ "persistentvolumes" ] - verbs: [ "get", "list", "watch", "create", "delete" ] - - apiGroups: [ "" ] - resources: [ "persistentvolumeclaims" ] - verbs: [ "get", "list", "watch", "update" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "storageclasses" ] - verbs: [ "get", "list", "watch" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create", "update", "patch" ] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: ClusterRole - # name: nfs-client-provisioner-runner - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get", "list", "watch", "create", "update", "patch" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-nfs-client-provisioner -subjects: - - kind: ServiceAccount - name: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system -roleRef: - kind: Role - name: leader-locking-nfs-client-provisioner - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: nfs-prod-distribute -provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nfs-client-provisioner - labels: - app: nfs-client-provisioner - # replace with namespace where provisioner is deployed - namespace: kube-system #与RBAC文件中的namespace保持一致 -spec: - replicas: 1 - selector: - matchLabels: - app: nfs-client-provisioner - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-client-provisioner - spec: - serviceAccountName: nfs-client-provisioner - containers: - - name: nfs-client-provisioner - image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 - volumeMounts: - - name: nfs-client-root - mountPath: /persistentvolumes - env: - - name: PROVISIONER_NAME - value: cmlc-nfs-storage - - name: NFS_SERVER - value: 10.250.0.110 - - name: NFS_PATH - value: /var/lib/docker/nfs_data - volumes: - - name: nfs-client-root - nfs: - server: 10.250.0.110 - path: /var/lib/docker/nfs_data diff --git a/agent-operator/deploy/z_bjtg/k8s-pvc.yaml b/agent-operator/deploy/z_bjtg/k8s-pvc.yaml deleted file mode 100755 index 916b9eb..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-pvc.yaml +++ /dev/null @@ -1,456 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-backend-log-pvc - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: nfs-backend-log-pvc - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 100Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-emqxs - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-mongo - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 30Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-rabbitmq - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-backend-log-pvc - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: nfs-backend-log-pvc - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 100Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-emqxs - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-mongo - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 30Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-rabbitmq - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-backend-log-pvc - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: nfs-backend-log-pvc - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 100Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-emqxs - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-mongo - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 30Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-rabbitmq - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-backend-log-pvc - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: nfs-backend-log-pvc - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 100Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-emqxs - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-mongo - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 30Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-rabbitmq - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-backend-log-pvc - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: nfs-backend-log-pvc - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 100Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-emqxs - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-mongo - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 30Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-rabbitmq - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-backend-log-pvc - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: nfs-backend-log-pvc - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 100Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-emqxs - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-emqxs - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-mongo - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-mongo - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 30Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - cmii.type: middleware-base - cmii.app: helm-rabbitmq - helm.sh/chart: all-persistence-volume-claims-1.1.0 - app.kubernetes.io/version: 5.5.0 -spec: - storageClassName: nfs-prod-distribute - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 20Gi diff --git a/agent-operator/deploy/z_bjtg/k8s-rabbitmq.yaml b/agent-operator/deploy/z_bjtg/k8s-rabbitmq.yaml deleted file mode 100755 index 975e4b5..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-rabbitmq.yaml +++ /dev/null @@ -1,3924 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -automountServiceAccountToken: true -secrets: - - name: helm-rabbitmq ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -type: Opaque -data: - rabbitmq-password: "blljUk45MXIuX2hq" - rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-rabbitmq-config - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -data: - rabbitmq.conf: |- - ## Username and password - ## - default_user = admin - default_pass = nYcRN91r._hj - ## Clustering - ## - cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s - cluster_formation.k8s.host = kubernetes.default.svc.cluster.local - cluster_formation.node_cleanup.interval = 10 - cluster_formation.node_cleanup.only_log_warning = true - cluster_partition_handling = autoheal - # queue master locator - queue_master_locator = min-masters - # enable guest user - loopback_users.guest = false - #default_vhost = default-vhost - #disk_free_limit.absolute = 50MB - #load_definitions = /app/load_definition.json ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -rules: - - apiGroups: [ "" ] - resources: [ "endpoints" ] - verbs: [ "get" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create" ] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helm-rabbitmq-endpoint-reader - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -subjects: - - kind: ServiceAccount - name: helm-rabbitmq -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: helm-rabbitmq-endpoint-reader ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq-headless - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - clusterIP: None - ports: - - name: epmd - port: 4369 - targetPort: epmd - - name: amqp - port: 5672 - targetPort: amqp - - name: dist - port: 25672 - targetPort: dist - - name: dashboard - port: 15672 - targetPort: stats - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - publishNotReadyAddresses: true ---- -apiVersion: v1 -kind: Service -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - type: NodePort - ports: - - name: amqp - port: 5672 - targetPort: amqp - nodePort: 35672 - - name: dashboard - port: 15672 - targetPort: dashboard - nodePort: 35675 - selector: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-rabbitmq - namespace: bjtg - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq -spec: - serviceName: helm-rabbitmq-headless - podManagementPolicy: OrderedReady - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: helm-rabbitmq - app.kubernetes.io/release: bjtg - template: - metadata: - labels: - app.kubernetes.io/name: helm-rabbitmq - helm.sh/chart: rabbitmq-8.26.1 - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: rabbitmq - annotations: - checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 - checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f - spec: - - serviceAccountName: helm-rabbitmq - affinity: { } - securityContext: - fsGroup: 5001 - runAsUser: 5001 - terminationGracePeriodSeconds: 120 - initContainers: - - name: volume-permissions - image: 10.250.0.110:8033/cmii/bitnami-shell:10-debian-10-r140 - imagePullPolicy: "Always" - command: - - /bin/bash - args: - - -ec - - | - mkdir -p "/bitnami/rabbitmq/mnesia" - chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" - securityContext: - runAsUser: 0 - resources: - limits: { } - requests: { } - volumeMounts: - - name: data - mountPath: /bitnami/rabbitmq/mnesia - containers: - - name: rabbitmq - image: 10.250.0.110:8033/cmii/rabbitmq:3.9.12-debian-10-r3 - imagePullPolicy: "Always" - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: K8S_SERVICE_NAME - value: "helm-rabbitmq-headless" - - name: K8S_ADDRESS_TYPE - value: hostname - - name: RABBITMQ_FORCE_BOOT - value: "no" - - name: RABBITMQ_NODE_NAME - value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: K8S_HOSTNAME_SUFFIX - value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" - - name: RABBITMQ_MNESIA_DIR - value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" - - name: RABBITMQ_LDAP_ENABLE - value: "no" - - name: RABBITMQ_LOGS - value: "-" - - name: RABBITMQ_ULIMIT_NOFILES - value: "65536" - - name: RABBITMQ_USE_LONGNAME - value: "true" - - name: RABBITMQ_ERL_COOKIE - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-erlang-cookie - - name: RABBITMQ_LOAD_DEFINITIONS - value: "no" - - name: RABBITMQ_SECURE_PASSWORD - value: "yes" - - name: RABBITMQ_USERNAME - value: "admin" - - name: RABBITMQ_PASSWORD - valueFrom: - secretKeyRef: - name: helm-rabbitmq - key: rabbitmq-password - - name: RABBITMQ_PLUGINS - value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" - ports: - - name: amqp - containerPort: 5672 - - name: dist - containerPort: 25672 - - name: dashboard - containerPort: 15672 - - name: epmd - containerPort: 4369 - livenessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q ping - initialDelaySeconds: 120 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/bash - - -ec - - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms - initialDelaySeconds: 10 - periodSeconds: 30 - timeoutSeconds: 20 - successThreshold: 1 - failureThreshold: 3 - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -ec - - | - if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then - /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" - else - rabbitmqctl stop_app - fi - resources: - limits: { } - requests: { } - volumeMounts: - - name: configuration - mountPath: /bitnami/rabbitmq/conf - - name: data - mountPath: /bitnami/rabbitmq/mnesia - volumes: - - name: configuration - configMap: - name: helm-rabbitmq-config - items: - - key: rabbitmq.conf - path: rabbitmq.conf - - name: data - persistentVolumeClaim: - claimName: helm-rabbitmq diff --git a/agent-operator/deploy/z_bjtg/k8s-redis.yaml b/agent-operator/deploy/z_bjtg/k8s-redis.yaml deleted file mode 100755 index 66ba7ba..0000000 --- a/agent-operator/deploy/z_bjtg/k8s-redis.yaml +++ /dev/null @@ -1,3492 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - redis-password: "TWNhY2hlQDQ1MjI=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-configuration - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - redis.conf: |- - # User-supplied common configuration: - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - # End of common configuration - master.conf: |- - dir /data - # User-supplied master configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of master configuration - replica.conf: |- - dir /data - slave-read-only yes - # User-supplied replica configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of replica configuration ---- -# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-health - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - ping_readiness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? - "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? - exit $exit_status - ping_liveness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? - "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? - exit $exit_status ---- -# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - start-master.sh: | - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then - cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") - exec redis-server "${ARGS[@]}" - start-replica.sh: | - #!/bin/bash - - get_port() { - hostname="$1" - type="$2" - - port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") - port=${!port_var} - - if [ -z "$port" ]; then - case $type in - "SENTINEL") - echo 26379 - ;; - "REDIS") - echo 6379 - ;; - esac - else - echo $port - fi - } - - get_full_hostname() { - hostname="$1" - echo "${hostname}.${HEADLESS_SERVICE}" - } - - REDISPORT=$(get_port "$HOSTNAME" "REDIS") - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then - cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - - echo "" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") - exec redis-server "${ARGS[@]}" ---- -# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-headless - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg ---- -# Source: outside-deploy/charts/redis-db/templates/master/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - type: ClusterIP - - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - type: ClusterIP - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica ---- -# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - affinity: { } - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-master.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: master - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - # One second longer than command timeout should prevent generation of zombie processes. - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - - name: tmp - mountPath: /tmp - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: tmp - emptyDir: { } - - name: redis-data - emptyDir: { } ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-replica.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: slave - - name: REDIS_MASTER_HOST - value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local - - name: REDIS_MASTER_PORT_NUMBER - value: "6379" - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_MASTER_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local_and_master.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local_and_master.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: redis-data - emptyDir: { } - ---- -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - redis-password: "TWNhY2hlQDQ1MjI=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-configuration - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - redis.conf: |- - # User-supplied common configuration: - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - # End of common configuration - master.conf: |- - dir /data - # User-supplied master configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of master configuration - replica.conf: |- - dir /data - slave-read-only yes - # User-supplied replica configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of replica configuration ---- -# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-health - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - ping_readiness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? - "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? - exit $exit_status - ping_liveness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? - "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? - exit $exit_status ---- -# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - start-master.sh: | - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then - cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") - exec redis-server "${ARGS[@]}" - start-replica.sh: | - #!/bin/bash - - get_port() { - hostname="$1" - type="$2" - - port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") - port=${!port_var} - - if [ -z "$port" ]; then - case $type in - "SENTINEL") - echo 26379 - ;; - "REDIS") - echo 6379 - ;; - esac - else - echo $port - fi - } - - get_full_hostname() { - hostname="$1" - echo "${hostname}.${HEADLESS_SERVICE}" - } - - REDISPORT=$(get_port "$HOSTNAME" "REDIS") - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then - cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - - echo "" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") - exec redis-server "${ARGS[@]}" ---- -# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-headless - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg ---- -# Source: outside-deploy/charts/redis-db/templates/master/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - type: ClusterIP - - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - type: ClusterIP - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica ---- -# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - affinity: { } - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-master.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: master - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - # One second longer than command timeout should prevent generation of zombie processes. - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - - name: tmp - mountPath: /tmp - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: tmp - emptyDir: { } - - name: redis-data - emptyDir: { } ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-replica.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: slave - - name: REDIS_MASTER_HOST - value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local - - name: REDIS_MASTER_PORT_NUMBER - value: "6379" - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_MASTER_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local_and_master.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local_and_master.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: redis-data - emptyDir: { } - ---- -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - redis-password: "TWNhY2hlQDQ1MjI=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-configuration - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - redis.conf: |- - # User-supplied common configuration: - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - # End of common configuration - master.conf: |- - dir /data - # User-supplied master configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of master configuration - replica.conf: |- - dir /data - slave-read-only yes - # User-supplied replica configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of replica configuration ---- -# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-health - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - ping_readiness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? - "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? - exit $exit_status - ping_liveness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? - "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? - exit $exit_status ---- -# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - start-master.sh: | - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then - cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") - exec redis-server "${ARGS[@]}" - start-replica.sh: | - #!/bin/bash - - get_port() { - hostname="$1" - type="$2" - - port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") - port=${!port_var} - - if [ -z "$port" ]; then - case $type in - "SENTINEL") - echo 26379 - ;; - "REDIS") - echo 6379 - ;; - esac - else - echo $port - fi - } - - get_full_hostname() { - hostname="$1" - echo "${hostname}.${HEADLESS_SERVICE}" - } - - REDISPORT=$(get_port "$HOSTNAME" "REDIS") - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then - cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - - echo "" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") - exec redis-server "${ARGS[@]}" ---- -# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-headless - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg ---- -# Source: outside-deploy/charts/redis-db/templates/master/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - type: ClusterIP - - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - type: ClusterIP - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica ---- -# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - affinity: { } - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-master.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: master - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - # One second longer than command timeout should prevent generation of zombie processes. - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - - name: tmp - mountPath: /tmp - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: tmp - emptyDir: { } - - name: redis-data - emptyDir: { } ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-replica.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: slave - - name: REDIS_MASTER_HOST - value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local - - name: REDIS_MASTER_PORT_NUMBER - value: "6379" - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_MASTER_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local_and_master.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local_and_master.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: redis-data - emptyDir: { } - ---- -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - redis-password: "TWNhY2hlQDQ1MjI=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-configuration - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - redis.conf: |- - # User-supplied common configuration: - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - # End of common configuration - master.conf: |- - dir /data - # User-supplied master configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of master configuration - replica.conf: |- - dir /data - slave-read-only yes - # User-supplied replica configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of replica configuration ---- -# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-health - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - ping_readiness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? - "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? - exit $exit_status - ping_liveness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? - "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? - exit $exit_status ---- -# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - start-master.sh: | - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then - cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") - exec redis-server "${ARGS[@]}" - start-replica.sh: | - #!/bin/bash - - get_port() { - hostname="$1" - type="$2" - - port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") - port=${!port_var} - - if [ -z "$port" ]; then - case $type in - "SENTINEL") - echo 26379 - ;; - "REDIS") - echo 6379 - ;; - esac - else - echo $port - fi - } - - get_full_hostname() { - hostname="$1" - echo "${hostname}.${HEADLESS_SERVICE}" - } - - REDISPORT=$(get_port "$HOSTNAME" "REDIS") - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then - cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - - echo "" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") - exec redis-server "${ARGS[@]}" ---- -# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-headless - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg ---- -# Source: outside-deploy/charts/redis-db/templates/master/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - type: ClusterIP - - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - type: ClusterIP - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica ---- -# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - affinity: { } - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-master.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: master - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - # One second longer than command timeout should prevent generation of zombie processes. - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - - name: tmp - mountPath: /tmp - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: tmp - emptyDir: { } - - name: redis-data - emptyDir: { } ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-replica.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: slave - - name: REDIS_MASTER_HOST - value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local - - name: REDIS_MASTER_PORT_NUMBER - value: "6379" - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_MASTER_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local_and_master.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local_and_master.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: redis-data - emptyDir: { } - ---- -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - redis-password: "TWNhY2hlQDQ1MjI=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-configuration - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - redis.conf: |- - # User-supplied common configuration: - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - # End of common configuration - master.conf: |- - dir /data - # User-supplied master configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of master configuration - replica.conf: |- - dir /data - slave-read-only yes - # User-supplied replica configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of replica configuration ---- -# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-health - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - ping_readiness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? - "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? - exit $exit_status - ping_liveness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? - "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? - exit $exit_status ---- -# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - start-master.sh: | - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then - cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") - exec redis-server "${ARGS[@]}" - start-replica.sh: | - #!/bin/bash - - get_port() { - hostname="$1" - type="$2" - - port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") - port=${!port_var} - - if [ -z "$port" ]; then - case $type in - "SENTINEL") - echo 26379 - ;; - "REDIS") - echo 6379 - ;; - esac - else - echo $port - fi - } - - get_full_hostname() { - hostname="$1" - echo "${hostname}.${HEADLESS_SERVICE}" - } - - REDISPORT=$(get_port "$HOSTNAME" "REDIS") - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then - cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - - echo "" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") - exec redis-server "${ARGS[@]}" ---- -# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-headless - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg ---- -# Source: outside-deploy/charts/redis-db/templates/master/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - type: ClusterIP - - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - type: ClusterIP - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica ---- -# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - affinity: { } - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-master.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: master - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - # One second longer than command timeout should prevent generation of zombie processes. - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - - name: tmp - mountPath: /tmp - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: tmp - emptyDir: { } - - name: redis-data - emptyDir: { } ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-replica.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: slave - - name: REDIS_MASTER_HOST - value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local - - name: REDIS_MASTER_PORT_NUMBER - value: "6379" - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_MASTER_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local_and_master.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local_and_master.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: redis-data - emptyDir: { } - ---- -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus ---- -apiVersion: v1 -kind: Secret -metadata: - name: helm-redis - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -type: Opaque -data: - redis-password: "TWNhY2hlQDQ1MjI=" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-configuration - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - redis.conf: |- - # User-supplied common configuration: - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - # End of common configuration - master.conf: |- - dir /data - # User-supplied master configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of master configuration - replica.conf: |- - dir /data - slave-read-only yes - # User-supplied replica configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of replica configuration ---- -# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-health - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - ping_readiness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_local.sh: |- - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h localhost \ - -p $REDIS_PORT \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ]; then - echo "$response" - exit 1 - fi - ping_liveness_master.sh: |- - #!/bin/bash - - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" - response=$( - timeout -s 3 $1 \ - redis-cli \ - -h $REDIS_MASTER_HOST \ - -p $REDIS_MASTER_PORT_NUMBER \ - ping - ) - if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then - echo "$response" - exit 1 - fi - ping_readiness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? - "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? - exit $exit_status - ping_liveness_local_and_master.sh: |- - script_dir="$(dirname "$0")" - exit_status=0 - "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? - "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? - exit $exit_status ---- -# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: helm-redis-scripts - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -data: - start-master.sh: | - #!/bin/bash - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then - cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") - exec redis-server "${ARGS[@]}" - start-replica.sh: | - #!/bin/bash - - get_port() { - hostname="$1" - type="$2" - - port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") - port=${!port_var} - - if [ -z "$port" ]; then - case $type in - "SENTINEL") - echo 26379 - ;; - "REDIS") - echo 6379 - ;; - esac - else - echo $port - fi - } - - get_full_hostname() { - hostname="$1" - echo "${hostname}.${HEADLESS_SERVICE}" - } - - REDISPORT=$(get_port "$HOSTNAME" "REDIS") - - [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then - cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf - fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then - cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf - fi - - echo "" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf - echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf - ARGS=("--port" "${REDIS_PORT}") - ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") - ARGS+=("--requirepass" "${REDIS_PASSWORD}") - ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") - ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") - ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") - exec redis-server "${ARGS[@]}" ---- -# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-headless - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg ---- -# Source: outside-deploy/charts/redis-db/templates/master/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - type: ClusterIP - - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - type: ClusterIP - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica ---- -# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-master - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - cmii.type: middleware - cmii.app: redis - app.kubernetes.io/component: master - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - affinity: { } - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-master.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: master - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - # One second longer than command timeout should prevent generation of zombie processes. - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - - name: tmp - mountPath: /tmp - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: tmp - emptyDir: { } - - name: redis-data - emptyDir: { } ---- -# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: helm-redis-replicas - namespace: bjtg - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: redis-db - app.kubernetes.io/release: bjtg - app.kubernetes.io/component: replica - serviceName: helm-redis-headless - updateStrategy: - rollingUpdate: { } - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: redis-db - octopus.control: redis-db-wdd - app.kubernetes.io/release: bjtg - app.kubernetes.io/managed-by: octopus - app.kubernetes.io/component: replica - annotations: - checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 - checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 - checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 - checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d - spec: - securityContext: - fsGroup: 1001 - serviceAccountName: helm-redis - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: 10.250.0.110:8033/cmii/redis:6.2.6-debian-10-r0 - imagePullPolicy: "Always" - securityContext: - runAsUser: 1001 - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-replica.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: slave - - name: REDIS_MASTER_HOST - value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local - - name: REDIS_MASTER_PORT_NUMBER - value: "6379" - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_MASTER_PASSWORD - valueFrom: - secretKeyRef: - name: helm-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local_and_master.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local_and_master.sh 1 - resources: - limits: - cpu: "2" - memory: 8Gi - requests: - cpu: "100m" - memory: 1Gi - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - subPath: - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc - volumes: - - name: start-scripts - configMap: - name: helm-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: helm-redis-health - defaultMode: 0755 - - name: config - configMap: - name: helm-redis-configuration - - name: redis-tmp-conf - emptyDir: { } - - name: redis-data - emptyDir: { } - diff --git a/agent-operator/image/CmiiImageOperator.go b/agent-operator/image/CmiiImageOperator.go index 155e242..e2a38e5 100755 --- a/agent-operator/image/CmiiImageOperator.go +++ b/agent-operator/image/CmiiImageOperator.go @@ -13,6 +13,7 @@ import ( "io" "io/fs" "os" + "path/filepath" "regexp" "strconv" "strings" @@ -190,9 +191,9 @@ func UploadToOctopusKindHarbor(targetImageName string) (pushResult io.ReadCloser } pushResult, err := apiClient.ImagePush(context.TODO(), targetImageName, types.ImagePushOptions{ - All: false, - //RegistryAuth: "eyAidXNlcm5hbWUiOiAiYWRtaW4iLCAicGFzc3dvcmQiOiAiVjJyeVN0ckBuZ1BzcyIsICJlbWFpbCI6ICJpY2VAcXEuY29tIiB9Cg==", - RegistryAuth: "eyAidXNlcm5hbWUiOiAiZGljdHp4IiwgInBhc3N3b3JkIjogIjlAcDNoekdWU0JscyVIWXUiLCAiZW1haWwiOiAiaWNlQHFxLmNvbSIgfQ==", + All: false, + RegistryAuth: "eyAidXNlcm5hbWUiOiAiYWRtaW4iLCAicGFzc3dvcmQiOiAiVjJyeVN0ckBuZ1BzcyIsICJlbWFpbCI6ICJpY2VAcXEuY29tIiB9Cg==", + //RegistryAuth: "eyAidXNlcm5hbWUiOiAiZGljdHp4IiwgInBhc3N3b3JkIjogIjlAcDNoekdWU0JscyVIWXUiLCAiZW1haWwiOiAiaWNlQHFxLmNvbSIgfQ==", PrivilegeFunc: nil, Platform: "amd64", }) @@ -248,6 +249,7 @@ func TagFromListAndPushToCHarbor(referenceImageList []string, targetHarborHost s //fmt.Println(scanner.Text()) } log.InfoF("[ImageTagFromListAndPushToCHarbor] - push of %s success!", targetImageName) + fmt.Println() } else { errorPushImageNameList = append(errorPushImageNameList, cmiiImageFullName) } @@ -328,13 +330,13 @@ func PullFromFullNameList(fullImageNameList []string) (errorPullImageList []stri } scanner := bufio.NewScanner(pullResult) for scanner.Scan() { - //line := scanner.Text() + line := scanner.Text() //if strings.Contains(line, "\"status\":\"Pulling from") { // fmt.Println(line) //} - //if strings.Contains(line, "Status: Image is up to date for") { - // fmt.Println(line) - //} + if strings.Contains(line, "Status: Image is up to date for") { + fmt.Println(line) + } } fmt.Println() } @@ -444,18 +446,22 @@ func SaveToGzipFile(imageFullName, folderPathPrefix string) (gzipOK bool, gzipIm } gzipImageFileFullPath = image2.ImageFullNameToGzipFileName(realImageTag) - if !strings.HasSuffix(folderPathPrefix, "/") { - folderPathPrefix += "/" + + if err := os.MkdirAll(filepath.Dir(gzipImageFileFullPath), os.ModePerm); err != nil { + log.ErrorF("[ImageSaveToTarGZ] - failed to create directory: %s", err) + return false, "" } - _ = os.MkdirAll(folderPathPrefix, os.ModeDir) // 生成gzip压缩文件的全路径名称 - gzipImageFileFullPath = folderPathPrefix + gzipImageFileFullPath + gzipImageFileFullPath = filepath.Join(folderPathPrefix, gzipImageFileFullPath) log.InfoF("[ImageSaveToTarGZ] - start to save [%s] to [%s]", realImageTag, gzipImageFileFullPath) // 删除掉旧的Gzip文件 - _ = os.Remove(gzipImageFileFullPath) + if err := os.Remove(gzipImageFileFullPath); err != nil && !os.IsNotExist(err) { + log.ErrorF("[ImageSaveToTarGZ] - failed to remove old gzip file: %s", err) + return false, "" + } // 创建 tarFile, err := os.Create(gzipImageFileFullPath) @@ -482,6 +488,73 @@ func SaveToGzipFile(imageFullName, folderPathPrefix string) (gzipOK bool, gzipIm return true, gzipImageFileFullPath } +// SaveImageListToGzipFile 将一个列表内的镜像全部压缩为一个tar.gz文件 +func SaveImageListToGzipFile(imageFullNames []string, folderPathPrefix string, outputFileName string) (gzipOK bool, gzipFileFullPath string, errorGzipImageList []string) { + + //if len(imageFullNames) == 0 { + // log.Error("[SaveImagesToGzipFile] - no images provided") + // return false, "", errorGzipImageList + //} + // + //// 确保输出文件路径 + //gzipFileFullPath = folderPathPrefix + outputFileName + //if err := os.MkdirAll(filepath.Dir(gzipFileFullPath), os.ModePerm); err != nil { + // log.ErrorF("[SaveImagesToGzipFile] - failed to create directory: %s", err) + // return false, "", errorGzipImageList + //} + // + //log.InfoF("[SaveImagesToGzipFile] - start saving images to [%s]", gzipFileFullPath) + // + //// 删除旧的Gzip文件 + //if err := os.Remove(gzipFileFullPath); err != nil && !os.IsNotExist(err) { + // log.ErrorF("[SaveImagesToGzipFile] - failed to remove old gzip file: %s", err) + // return false, "", errorGzipImageList + //} + // + //tarFile, err := os.Create(gzipFileFullPath) + //if err != nil { + // log.ErrorF("[SaveImagesToGzipFile] - error creating gzip file: %s", err) + // return false, "", errorGzipImageList + //} + //defer tarFile.Close() + // + //gw, err := pgzip.NewWriterLevel(tarFile, pgzip.DefaultCompression) + //if err != nil { + // log.ErrorF("[SaveImagesToGzipFile] - pgzip writer creation error: %s", err) + // return false, "", errorGzipImageList + //} + //defer gw.Close() + // + //for _, imageFullName := range imageFullNames { + // imageGetByName := GetByName(imageFullName) + // if imageGetByName == nil { + // log.WarnF("[SaveImagesToGzipFile] - %s not exists, skipping", imageFullName) + // continue + // } + // + // imageSaveTarStream, err := apiClient.ImageSave(context.TODO(), imageGetByName.RepoTags) + // if err != nil { + // log.ErrorF("[SaveImagesToGzipFile] - image save error for %s: %s", imageFullName, err) + // continue + // } + // + // if _, err := io.Copy(gw, imageSaveTarStream); err != nil { + // log.ErrorF("[SaveImagesToGzipFile] - failed to copy tar archive for %s to gzip writer: %s", imageFullName, err) + // continue + // } + // + //} + // + //if err := gw.Close(); err != nil { + // log.ErrorF("[SaveImagesToGzipFile] - error closing gzip writer: %s", err) + // return false, "", errorGzipImageList + //} + // + //log.InfoF("[SaveImagesToGzipFile] - successfully saved images to [%s]", gzipFileFullPath) + return true, gzipFileFullPath, errorGzipImageList + +} + func CmiiImageMapToFullNameList(cmiiImageVersionMap map[string]string) (fullImageNameList []string) { for image, tag := range cmiiImageVersionMap { @@ -509,27 +582,6 @@ func CmiiImageMapFromGzipFolder(gzipFileFolder string) (cmiiImageVersionMap map[ return cmiiImageVersionMap } -func FrontendBackendSrsImageMapFromCmiiImageMap(cmiiImageVersionMap map[string]string) (frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap map[string]string) { - - frontendImageVersionMap = make(map[string]string) - backendImageVersionMap = make(map[string]string) - srsImageVersionMap = make(map[string]string) - - for imageName, imageTag := range cmiiImageVersionMap { - if strings.Contains(imageName, "platform") { - frontendImageVersionMap[imageName] = imageTag - } else if strings.Contains(imageName, "srs") { - srsImageVersionMap[imageName] = imageTag - } else if strings.Contains(imageName, "operator") { - srsImageVersionMap[imageName] = imageTag - } else { - backendImageVersionMap[imageName] = imageTag - } - } - - return frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap -} - // GenerateCmiiTagVersionImageMap 生成特定版本的ImageTagMap func GenerateCmiiTagVersionImageMap(specificTag string) (backendMap, frontendMap, srsMap map[string]string) { matched, _ := regexp.MatchString(`^\d+\.\d+\.\d+$`, specificTag) @@ -543,10 +595,10 @@ func GenerateCmiiTagVersionImageMap(specificTag string) (backendMap, frontendMap frontendMap = make(map[string]string, len(d_app.CmiiFrontendAppMap)) srsMap = make(map[string]string, len(d_app.CmiiSrsAppMap)) - for imageName, _ := range d_app.CmiiBackendAppMap { + for imageName := range d_app.CmiiBackendAppMap { backendMap[imageName] = specificTag } - for imageName, _ := range d_app.CmiiFrontendAppMap { + for imageName := range d_app.CmiiFrontendAppMap { frontendMap[imageName] = specificTag } for imageName, imageTag := range d_app.CmiiSrsAppMap { diff --git a/agent-operator/image/CmiiImageOperator_test.go b/agent-operator/image/CmiiImageOperator_test.go index 9282ad0..ea84c6c 100755 --- a/agent-operator/image/CmiiImageOperator_test.go +++ b/agent-operator/image/CmiiImageOperator_test.go @@ -9,7 +9,6 @@ import ( "wdd.io/agent-common/image" "wdd.io/agent-common/utils" "wdd.io/agent-deploy/d_app" - "wdd.io/agent-operator/real_project/zjjt" ) func TestGetRunningContainer(t *testing.T) { @@ -205,13 +204,6 @@ func TestConvertCmiiImageMapFromGzipFolder(t *testing.T) { utils.BeautifulPrint(versionMap) } -func TestFrontendBackendImageMapFromCmiiImageMap(t *testing.T) { - frontendImageVersionMap, backendImageVersionMap, _ := FrontendBackendSrsImageMapFromCmiiImageMap(zjjt.CmiiImageMap) - - utils.BeautifulPrint(frontendImageVersionMap) - utils.BeautifulPrint(backendImageVersionMap) -} - func TestImageNameToTargetImageFullName(t *testing.T) { AllCmiiImageTagList := []string{ @@ -291,3 +283,20 @@ func TestImageNameToTargetImageFullName(t *testing.T) { utils.BeautifulPrint(result) } + +func TestSaveImageListToGzipFile(t *testing.T) { + + allImageList := []string{ + "harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.7.0", + "harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0", + "harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0", + } + + gzipOK, gzipFileFullPath, errorGzipImageList := SaveImageListToGzipFile(allImageList, "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/image", "test.tar.gz") + + assert.Equal(t, gzipOK, true, "gzip image list to single file failed !") + + log.Info("gzip file is " + gzipFileFullPath) + + utils.BeautifulPrint(errorGzipImageList) +} diff --git a/agent-operator/image/HarborOperator_test.go b/agent-operator/image/HarborOperator_test.go index e8805fa..55c9991 100755 --- a/agent-operator/image/HarborOperator_test.go +++ b/agent-operator/image/HarborOperator_test.go @@ -9,7 +9,7 @@ import ( var DefaultHarborOperator *HarborOperator -func TestHarborOperator_BuildOperator(t *testing.T) { +func TestHarborOperator_BuildOperator_CMII(t *testing.T) { harborOperator := &HarborOperator{ HarborHost: "http://harbor.cdcyy.com.cn", HarborPort: "", @@ -28,6 +28,25 @@ func TestHarborOperator_BuildOperator(t *testing.T) { } +func TestHarborOperator_BuildOperator(t *testing.T) { + harborOperator := &HarborOperator{ + HarborHost: "http://harbor.wdd.io", + HarborPort: "8033", + HarborUser: "admin", + HarborPass: "V2ryStr@ngPss", + HarborClient: nil, + } + + _, err := harborOperator.BuildOperator() + if err != nil { + t.Logf("error is %s", err.Error()) + return + } + + DefaultHarborOperator = harborOperator + +} + func TestHarborOperator_RepoListAll(t *testing.T) { TestHarborOperator_BuildOperator(t) @@ -67,7 +86,7 @@ func TestHarborOperator_ArtifactListAll(t *testing.T) { } func TestHarborOperator_CmiiHarborCleanUp(t *testing.T) { - TestHarborOperator_BuildOperator(t) + TestHarborOperator_BuildOperator_CMII(t) //repoListAll := DefaultHarborOperator.RepoListAll("ran") repoListAll := DefaultHarborOperator.RepoListAll("cmii") @@ -116,15 +135,15 @@ func TestHarborOperator_ArtifactDeleteOne(t *testing.T) { } func TestHarborOperator_CmiiTagFilter(t *testing.T) { - TestHarborOperator_BuildOperator(t) - imageMap := DefaultHarborOperator.CmiiTagFilter("4") + TestHarborOperator_BuildOperator_CMII(t) + imageMap := DefaultHarborOperator.CmiiTagFilter("5.7") utils.BeautifulPrint(imageMap) } func TestHarborOperator_ArtifactDeleteFromNameTagList(t *testing.T) { TestHarborOperator_BuildOperator(t) - allCmiiImageList := DefaultHarborOperator.CmiiTagFilter("4") + allCmiiImageList := DefaultHarborOperator.CmiiTagFilter("5.5") errorDeleteList := DefaultHarborOperator.ArtifactDeleteFromNameTagList("cmii", allCmiiImageList) utils.BeautifulPrint(errorDeleteList) } diff --git a/agent-operator/log/cmii-update-log.txt b/agent-operator/log/cmii-update-log.txt index 37e261a..b090cca 100755 --- a/agent-operator/log/cmii-update-log.txt +++ b/agent-operator/log/cmii-update-log.txt @@ -106,3 +106,112 @@ 2024-07-12-11-40-00 uavcloud-demo cmii-uav-industrial-portfolio 5.6.0-071201 5.6.0-071202 2024-07-15-10-05-51 uavcloud-demo cmii-uav-industrial-portfolio 5.6.0-071206 5.6.0-071501 2024-07-16-17-06-59 uavcloud-demo cmii-uav-industrial-portfolio 5.6.0-071501 5.6.0-071601 +2024-07-23-09-35-40 uavcloud-demo cmii-uas-lifecycle 5.7.0-snapshot 5.7.0-30403-072301 +2024-07-23-10-53-00 uavcloud-demo cmii-uav-platform 5.7.0 5.7.0-29267-072301 +2024-07-23-13-43-35 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0-31369-yunnan-0723 5.7.0-31369-yunnan-072301 +2024-07-23-15-18-56 uavcloud-demo cmii-uav-platform 5.7.0-29267-072301 5.7.0 +2024-07-17-14-37-11 uavcloud-demo cmii-uav-industrial-portfolio 5.6.0-071601 5.6.0-071701 +2024-07-17-17-45-00 uavcloud-demo cmii-uav-platform 5.6.0-29267-0717 5.6.0-071701 +2024-07-17-17-45-09 uavcloud-demo cmii-uas-lifecycle 5.6.0 5.6.0-30403-071701 +2024-07-17-17-48-00 uavcloud-demo cmii-uav-platform 5.6.0-071701 5.6.0-071702 +2024-07-18-10-05-00 uavcloud-demo cmii-uas-lifecycle 5.6.0-30403-071701 5.6.0-30403-071801 +2024-07-18-17-17-58 uavcloud-demo cmii-uav-emergency 5.6.0-0704 5.7.0 +2024-07-18-17-17-59 uavcloud-demo cmii-uav-gis-server 5.6.0 5.7.0 +2024-07-18-17-18-00 uavcloud-demo cmii-uav-sense-adapter 5.6.0-0716 5.7.0 +2024-07-18-17-18-02 uavcloud-demo cmii-open-gateway 5.6.0 5.7.0 +2024-07-18-17-18-03 uavcloud-demo cmii-uav-cloud-live 5.6.0 5.7.0 +2024-07-18-17-18-04 uavcloud-demo cmii-uav-mission 5.5.0-30015-061801 5.7.0 +2024-07-18-17-18-06 uavcloud-demo cmii-uav-mqtthandler 5.6.0-30067-071604 5.7.0 +2024-07-18-17-18-07 uavcloud-demo cmii-uav-alarm 5.6.0 5.7.0 +2024-07-18-17-18-08 uavcloud-demo cmii-uav-material-warehouse 5.6.0-062602 5.7.0 +2024-07-18-17-18-10 uavcloud-demo cmii-uav-integration 5.7.0-30015-29835-071601 5.7.0 +2024-07-18-17-18-11 uavcloud-demo cmii-suav-supervision 5.6.0 5.7.0 +2024-07-18-17-18-12 uavcloud-demo cmii-uav-airspace 5.6.0-0704 5.7.0 +2024-07-18-17-18-14 uavcloud-demo cmii-uav-logger 5.6.0 5.7.0 +2024-07-18-17-18-16 uavcloud-demo cmii-uav-threedsimulation 5.5.0 5.7.0 +2024-07-18-17-18-18 uavcloud-demo cmii-admin-data 5.6.0 5.7.0 +2024-07-18-17-18-19 uavcloud-demo cmii-uav-industrial-portfolio 5.6.0-071701 5.7.0 +2024-07-18-17-18-20 uavcloud-demo cmii-uav-process 5.6.0-060601 5.7.0 +2024-07-18-17-18-22 uavcloud-demo cmii-uav-surveillance 5.6.0-30015-070801 5.7.0 +2024-07-18-17-18-23 uavcloud-demo cmii-uav-user 5.6.0-0704 5.7.0 +2024-07-18-17-18-24 uavcloud-demo cmii-uav-developer 5.6.0-0708 5.7.0 +2024-07-18-17-18-26 uavcloud-demo cmii-uav-data-post-process 5.6.0-062401 5.7.0 +2024-07-18-17-18-27 uavcloud-demo cmii-admin-gateway 5.6.0 5.7.0 +2024-07-18-17-18-29 uavcloud-demo cmii-uav-gateway 5.6.0-061202 5.7.0 +2024-07-18-17-18-30 uavcloud-demo cmii-uav-waypoint 5.6.0 5.7.0 +2024-07-18-17-18-31 uavcloud-demo cmii-admin-user 5.6.0 5.7.0 +2024-07-18-17-18-33 uavcloud-demo cmii-uav-cms 5.5.0 5.7.0 +2024-07-18-17-18-34 uavcloud-demo cmii-uav-device 5.6.0-0715 5.7.0 +2024-07-18-17-18-36 uavcloud-demo cmii-uav-notice 5.6.0 5.7.0 +2024-07-18-17-18-37 uavcloud-demo cmii-uav-oauth 5.6.0-0704 5.7.0 +2024-07-18-17-18-38 uavcloud-demo cmii-uav-tower 5.6.0-062601 5.7.0 +2024-07-18-17-18-40 uavcloud-demo cmii-uav-multilink 5.5.0 5.7.0 +2024-07-18-17-18-41 uavcloud-demo cmii-uav-brain 5.5.0 5.7.0 +2024-07-18-17-20-49 uavcloud-demo cmii-suav-platform-supervisionh5 5.6.0 5.7.0 +2024-07-18-17-20-51 uavcloud-demo cmii-uav-platform-ai-brain 5.6.0 5.7.0 +2024-07-18-17-20-52 uavcloud-demo cmii-uav-platform-cms-portal 5.6.0 5.7.0 +2024-07-18-17-20-53 uavcloud-demo cmii-uav-platform-open 5.6.0-0704 5.7.0 +2024-07-18-17-20-55 uavcloud-demo cmii-uav-platform-share 5.6.0 5.7.0 +2024-07-18-17-20-56 uavcloud-demo cmii-suav-platform-supervision 5.6.0-0708 5.7.0 +2024-07-18-17-20-57 uavcloud-demo cmii-uav-platform 5.6.0-071702 5.7.0 +2024-07-18-17-20-58 uavcloud-demo cmii-uav-platform-armypeople 5.6.0-28028-071102 5.7.0 +2024-07-18-17-21-00 uavcloud-demo cmii-uav-platform-media 5.6.0-0710 5.7.0 +2024-07-18-17-21-02 uavcloud-demo cmii-uav-platform-mws 5.6.0 5.7.0 +2024-07-18-17-21-04 uavcloud-demo cmii-uav-platform-oms 5.6.0 5.7.0 +2024-07-18-17-21-05 uavcloud-demo cmii-uav-platform-securityh5 5.6.0 5.7.0 +2024-07-18-17-26-40 uavcloud-demo cmii-uav-brain 5.7.0 5.5.0 +2024-07-18-17-28-26 uavcloud-demo cmii-uav-multilink 5.7.0 5.5.0 +2024-07-18-17-35-01 uavcloud-demo cmii-uas-lifecycle 5.6.0-30403-071801 5.6.0-30403-071802 +2024-07-18-17-40-02 uavcloud-demo cmii-uas-lifecycle 5.6.0-30403-071802 5.6.0-30403-071801 +2024-07-18-18-24-25 uavcloud-demo cmii-admin-data 5.7.0 +2024-07-18-18-24-26 uavcloud-demo cmii-uav-gateway 5.7.0 +2024-07-18-18-24-27 uavcloud-demo cmii-uav-tower 5.7.0 +2024-07-18-18-24-29 uavcloud-demo cmii-uav-user 5.7.0 +2024-07-18-18-24-30 uavcloud-demo cmii-open-gateway 5.7.0 +2024-07-18-18-24-31 uavcloud-demo cmii-uav-data-post-process 5.7.0 +2024-07-18-18-24-32 uavcloud-demo cmii-uav-oauth 5.7.0 +2024-07-18-18-24-34 uavcloud-demo cmii-uav-sense-adapter 5.7.0 +2024-07-18-18-24-35 uavcloud-demo cmii-admin-gateway 5.7.0 +2024-07-18-18-24-36 uavcloud-demo cmii-admin-user 5.7.0 +2024-07-18-18-24-38 uavcloud-demo cmii-uav-alarm 5.7.0 +2024-07-18-18-24-40 uavcloud-demo cmii-uav-mission 5.7.0 +2024-07-18-18-24-41 uavcloud-demo cmii-uav-notice 5.7.0 +2024-07-18-18-24-42 uavcloud-demo cmii-uav-multilink 5.7.0 +2024-07-18-18-24-43 uavcloud-demo cmii-uav-brain 5.7.0 +2024-07-18-18-24-45 uavcloud-demo cmii-uav-developer 5.7.0 +2024-07-18-18-24-46 uavcloud-demo cmii-uav-mqtthandler 5.7.0 +2024-07-18-18-24-48 uavcloud-demo cmii-uav-process 5.7.0 +2024-07-18-18-24-49 uavcloud-demo cmii-uav-threedsimulation 5.7.0 +2024-07-18-18-24-50 uavcloud-demo cmii-uav-waypoint 5.7.0 +2024-07-18-18-24-54 uavcloud-demo cmii-uav-airspace 5.7.0 +2024-07-18-18-24-56 uavcloud-demo cmii-uav-material-warehouse 5.7.0 +2024-07-18-18-24-58 uavcloud-demo cmii-suav-supervision 5.7.0 +2024-07-18-18-25-00 uavcloud-demo cmii-uav-cms 5.7.0 +2024-07-18-18-25-03 uavcloud-demo cmii-uav-emergency 5.7.0 +2024-07-18-18-25-08 uavcloud-demo cmii-uav-gis-server 5.7.0 +2024-07-18-18-25-14 uavcloud-demo cmii-uav-surveillance 5.7.0 +2024-07-18-18-25-16 uavcloud-demo cmii-uav-cloud-live 5.7.0 +2024-07-18-18-25-18 uavcloud-demo cmii-uav-device 5.7.0 +2024-07-18-18-25-20 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0 +2024-07-18-18-25-21 uavcloud-demo cmii-uav-integration 5.7.0 +2024-07-18-18-25-23 uavcloud-demo cmii-uav-logger 5.7.0 +2024-07-18-18-25-25 uavcloud-demo cmii-uav-platform-oms 5.7.0 +2024-07-18-18-25-27 uavcloud-demo cmii-uav-platform-open 5.7.0 +2024-07-18-18-25-28 uavcloud-demo cmii-uav-platform-securityh5 5.7.0 +2024-07-18-18-25-29 uavcloud-demo cmii-suav-platform-supervision 5.7.0 +2024-07-18-18-25-31 uavcloud-demo cmii-uav-platform-ai-brain 5.7.0 +2024-07-18-18-25-32 uavcloud-demo cmii-uav-platform-armypeople 5.7.0 +2024-07-18-18-25-34 uavcloud-demo cmii-uav-platform-media 5.7.0 +2024-07-18-18-25-37 uavcloud-demo cmii-uav-platform-mws 5.7.0 +2024-07-18-18-25-39 uavcloud-demo cmii-suav-platform-supervisionh5 5.7.0 +2024-07-18-18-25-40 uavcloud-demo cmii-uav-platform 5.7.0 +2024-07-18-18-25-42 uavcloud-demo cmii-uav-platform-cms-portal 5.7.0 +2024-07-18-18-25-43 uavcloud-demo cmii-uav-platform-share 5.7.0 +2024-07-18-18-28-00 uavcloud-demo cmii-uav-multilink 5.5.0 +2024-07-18-18-30-21 uavcloud-demo cmii-uav-brain 5.5.0 +2024-07-19-09-29-48 uavcloud-demo cmii-uav-platform-armypeople 5.7.0 5.7.0-29668-071901 +2024-07-19-09-54-00 uavcloud-demo cmii-uav-platform-armypeople 5.7.0-29668-071901 5.7.0 +2024-08-20-17-36-40 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0-31369-yunnan-081901 5.7.0-31369-yunnan-082001 +2024-08-21-11-03-00 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0-31369-yunnan-082001 5.7.0-31369-yunnan-082101 +2024-08-21-14-50-50 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0-31369-yunnan-082101 5.7.0-31369-yunnan-082201 +2024-08-21-14-55-15 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0-31369-yunnan-082201 5.7.0-31369-yunnan-082102 diff --git a/agent-operator/main.go b/agent-operator/main.go index bbd9499..838d9c7 100755 --- a/agent-operator/main.go +++ b/agent-operator/main.go @@ -15,8 +15,6 @@ var LocalKubeConfigFile = "/root/.kube/config" // C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd64" -output "build/agent-operator_{{.OS}}_{{.Arch}}" // C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd64 linux/arm64" -output "build/agent-operator_{{.OS}}_{{.Arch}}" -// - func BuildDefaultK8sOperator() { // build from local LocalKubeConfigFile diff --git a/agent-operator/real_project/bgtg/Config.go b/agent-operator/real_project/bgtg/Config.go deleted file mode 100755 index 8aafd58..0000000 --- a/agent-operator/real_project/bgtg/Config.go +++ /dev/null @@ -1,71 +0,0 @@ -package bgtg - -var AllCmiiImageTagList = []string{ - "cmii-uav-tower:5.4.0-0319", - "cmii-uav-platform-logistics:5.4.0", - "cmii-uav-platform-qinghaitourism:4.1.0-21377-0508", - "cmii-uav-platform-securityh5:5.4.0", - "cmii-uav-platform:5.4.0-25263-041102", - "cmii-uav-platform-ai-brain:5.4.0", - "cmii-uav-emergency:5.3.0", - "cmii-uav-kpi-monitor:5.4.0", - "cmii-uav-platform-splice:5.4.0-040301", - "cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427", - "cmii-live-operator:5.2.0", - "cmii-uav-gateway:5.4.0", - "cmii-uav-platform-security:4.1.6", - "cmii-uav-integration:5.4.0-25916", - "cmii-uav-notice:5.4.0", - "cmii-uav-platform-open:5.4.0", - "cmii-srs-oss-adaptor:2023-SA", - "cmii-admin-gateway:5.4.0", - "cmii-uav-process:5.4.0-0410", - "cmii-suav-supervision:5.4.0-032501", - "cmii-uav-platform-cms-portal:5.4.0", - "cmii-uav-platform-multiterminal:5.4.0", - "cmii-admin-data:5.4.0-0403", - "cmii-uav-cloud-live:5.4.0", - "cmii-uav-grid-datasource:5.2.0-24810", - "cmii-uav-platform-qingdao:4.1.6-24238-qingdao", - "cmii-admin-user:5.4.0", - "cmii-uav-industrial-portfolio:5.4.0-28027-041102", - "cmii-uav-alarm:5.4.0-0409", - "cmii-uav-clusters:5.2.0", - "cmii-uav-platform-oms:5.4.0", - "cmii-uav-platform-hljtt:5.3.0-hjltt", - "cmii-uav-platform-mws:5.4.0", - "cmii-uav-autowaypoint:4.1.6-cm", - "cmii-uav-grid-manage:5.1.0", - "cmii-uav-platform-share:5.4.0", - "cmii-uav-cms:5.3.0", - "cmii-uav-oauth:5.4.0-032901", - "cmii-open-gateway:5.4.0", - "cmii-uav-data-post-process:5.4.0", - "cmii-uav-multilink:5.4.0-032701", - "cmii-uav-platform-media:5.4.0", - "cmii-uav-platform-visualization:5.2.0", - "cmii-uav-platform-emergency-rescue:5.2.0", - "cmii-app-release:4.2.0-validation", - "cmii-uav-device:5.4.0-28028-0409", - "cmii-uav-gis-server:5.4.0", - "cmii-uav-brain:5.4.0", - "cmii-uav-depotautoreturn:5.4.0", - "cmii-uav-threedsimulation:5.1.0", - "cmii-uav-grid-engine:5.1.0", - "cmii-uav-developer:5.4.0-040701", - "cmii-uav-waypoint:5.4.0-032901", - "cmii-uav-platform-base:5.4.0", - "cmii-uav-platform-threedsimulation:5.2.0-21392", - "cmii-uav-platform-detection:5.4.0", - "cmii-uav-logger:5.4.0-0319", - "cmii-uav-platform-seniclive:5.2.0", - "cmii-suav-platform-supervisionh5:5.4.0", - "cmii-uav-user:5.4.0", - "cmii-uav-surveillance:5.4.0-28028-0409", - "cmii-uav-mission:5.4.0-28028-041006", - "cmii-uav-mqtthandler:5.4.0-25916-041001", - "cmii-uav-material-warehouse:5.4.0-0407", - "cmii-uav-platform-armypeople:5.4.0-041201", - "cmii-suav-platform-supervision:5.4.0", - "cmii-uav-airspace:5.4.0-0402", -} diff --git a/server/src/test/java/io/wdd/server/func/TestBaseFuncScheduler.java b/server/src/test/java/io/wdd/server/func/TestBaseFuncScheduler.java index 78a67c6..4e0e3c7 100755 --- a/server/src/test/java/io/wdd/server/func/TestBaseFuncScheduler.java +++ b/server/src/test/java/io/wdd/server/func/TestBaseFuncScheduler.java @@ -126,25 +126,25 @@ public class TestBaseFuncScheduler { throw new RuntimeException(e); } - baseFuncScheduler.runProcedure(projectDeployContext); +// baseFuncScheduler.runProcedure(projectDeployContext); // 只能支持 带端口的HarborHost 默认为8033Chengdu-amd64-110 -// harborFuncScheduler.runProcedure(projectDeployContext); + harborFuncScheduler.runProcedure(projectDeployContext); List appFunctionEnumList = List.of( -// AppFunctionEnum.DEPLOY_CHRONY_SERVER, -// AppFunctionEnum.DEPLOY_RKE + AppFunctionEnum.DEPLOY_CHRONY_SERVER, + AppFunctionEnum.DEPLOY_RKE // AppFunctionEnum.DEPLOY_K8S_DASHBOARD, // AppFunctionEnum.DEPLOY_NFS, // AppFunctionEnum.DEPLOY_TEST_NFS - AppFunctionEnum.DEPLOY_K8S_NAMESPACE, - AppFunctionEnum.DEPLOY_K8S_PVC, - AppFunctionEnum.DEPLOY_K8S_MYSQL, - AppFunctionEnum.DEPLOY_K8S_REDIS, - AppFunctionEnum.DEPLOY_K8S_MIDDLEWARES +// AppFunctionEnum.DEPLOY_K8S_NAMESPACE, +// AppFunctionEnum.DEPLOY_K8S_PVC, +// AppFunctionEnum.DEPLOY_K8S_MYSQL, +// AppFunctionEnum.DEPLOY_K8S_REDIS, +// AppFunctionEnum.DEPLOY_K8S_MIDDLEWARES // AppFunctionEnum.DEPLOY_INGRESS, // AppFunctionEnum.DEPLOY_FRONTEND,