diff --git a/agent-common/assert/MyAssert.go b/agent-common/assert/MyAssert.go new file mode 100644 index 0000000..c778a46 --- /dev/null +++ b/agent-common/assert/MyAssert.go @@ -0,0 +1,68 @@ +package assert + +import ( + "fmt" + "reflect" + "strings" +) + +var Asserter = NewAssert() + +// Assert utility class +type Assert struct{} + +// NewAssert returns a new instance of Assert +func NewAssert() *Assert { + return &Assert{} +} + +// NotEmpty checks if the given value is not empty +func (a *Assert) NotEmpty(value interface{}, message string) { + if isEmptyValue(reflect.ValueOf(value)) { + panic(fmt.Sprintf("Assertion failed: %s", message)) + } +} + +// NotBlank checks if the given string is not blank +func (a *Assert) NotBlank(str string, message string) { + if str == "" || len(strings.TrimSpace(str)) == 0 { + panic(fmt.Sprintf("Assertion failed: %s", message)) + } +} + +// Equals checks if two values are equal +func (a *Assert) Equals(expected, actual interface{}, message string) { + if !reflect.DeepEqual(expected, actual) { + panic(fmt.Sprintf("Assertion failed: %s. Expected '%v' but got '%v'", message, expected, actual)) + } +} + +// Nil checks if the given value is nil +func (a *Assert) Nil(value interface{}, message string) { + if value != nil { + panic(fmt.Sprintf("Assertion failed: %s", message)) + } +} + +// NotNil checks if the given value is not nil +func (a *Assert) NotNil(value interface{}, message string) { + if value == nil { + panic(fmt.Sprintf("Assertion failed: %s", message)) + } +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Map: + return v.IsNil() || v.Len() == 0 + case reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + default: + return false + } +} diff --git a/agent-common/image/ImageNameConvert.go b/agent-common/image/ImageNameConvert.go index 9035c0c..ca4eedc 100644 --- a/agent-common/image/ImageNameConvert.go +++ b/agent-common/image/ImageNameConvert.go @@ -133,6 +133,11 @@ func GzipFileNameToImageFullName(gzipFileName string) (imageFullName string) { } gzipFileName = strings.TrimSuffix(gzipFileName, ".tar.gz") + if strings.HasPrefix(gzipFileName, "docker=library") { + // docker=library=busybox=latest.tar.gz + return strings.Split(gzipFileName, "=")[2] + ":" + strings.Split(gzipFileName, "=")[3] + } + if strings.HasPrefix(gzipFileName, "docker") { return strings.Split(gzipFileName, "=")[1] + "/" + strings.Split(gzipFileName, "=")[2] + ":" + strings.Split(gzipFileName, "=")[3] } diff --git a/agent-common/utils/FileUtils.go b/agent-common/utils/FileUtils.go index 4c0778a..b9b5739 100644 --- a/agent-common/utils/FileUtils.go +++ b/agent-common/utils/FileUtils.go @@ -68,6 +68,29 @@ func AppendContentToFile(content string, targetFile string) bool { return true } +func AppendContentWithSplitLineToFile(content string, targetFile string) bool { + + // 打开文件用于追加。如果文件不存在,将会创建一个新文件。 + file, err := os.OpenFile(targetFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.ErrorF("[BasicAppendContentToFile] - Error opening file: %s , error is %s", targetFile, err.Error()) + return false + } + defer file.Close() // 确保文件最终被关闭 + + // 写入内容到文件 + if _, err := file.WriteString("---"); err != nil { + log.ErrorF("[BasicAppendContentToFile] - Error writing to file: %s , error is %s", targetFile, err.Error()) + return false + } + if _, err := file.WriteString(content); err != nil { + log.ErrorF("[BasicAppendContentToFile] - Error writing to file: %s , error is %s", targetFile, err.Error()) + return false + } + + return true +} + // AppendNullToFile 清空一个文件 func AppendNullToFile(targetFile string) bool { diff --git a/agent-common/utils/StringUtils.go b/agent-common/utils/StringUtils.go new file mode 100644 index 0000000..ad85839 --- /dev/null +++ b/agent-common/utils/StringUtils.go @@ -0,0 +1,17 @@ +package utils + +import ( + "math/rand" + "time" +) + +func GenerateRandomString(length int) string { + + rand.Seed(time.Now().UnixNano()) + chars := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890" + b := make([]byte, length) + for i := range b { + b[i] = chars[rand.Intn(len(chars))] + } + return string(b) +} diff --git a/agent-go/bastion_mode_init/amd64/socks5_linux_amd64 b/agent-go/bastion_mode_init/amd64/socks5_linux_amd64 deleted file mode 100644 index 71cb666..0000000 Binary files a/agent-go/bastion_mode_init/amd64/socks5_linux_amd64 and /dev/null differ diff --git a/agent-go/bastion_mode_init/arm64/socks5_linux_arm64 b/agent-go/bastion_mode_init/arm64/socks5_linux_arm64 deleted file mode 100644 index a5acf6b..0000000 Binary files a/agent-go/bastion_mode_init/arm64/socks5_linux_arm64 and /dev/null differ diff --git a/agent-go/bastion_mode_init/bastion_mode.sh b/agent-go/bastion_mode_init/bastion_mode.sh deleted file mode 100644 index 1e63a5c..0000000 --- a/agent-go/bastion_mode_init/bastion_mode.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# 需要修改的部分 -# 需要修改的部分 - -# Socks5 -install_socks5() { - -} -# MINIO 安装 -install_minio_server() { - -} -## -# RabbitMQ 安装 初始化 diff --git a/agent-operator/CmiiOperator.go b/agent-operator/CmiiOperator.go index d85cd25..beec49a 100644 --- a/agent-operator/CmiiOperator.go +++ b/agent-operator/CmiiOperator.go @@ -217,7 +217,6 @@ func DownloadLoadTagPush(downloadFromOss bool, ossUrlPrefix, ossFileName, localG } // load loadAllGzipImageFromLocalFolder(localGzipFolder) - image.LoadFromFolderPath(localGzipFolder) // tag // push @@ -252,7 +251,7 @@ func DownloadLoadTagPush(downloadFromOss bool, ossUrlPrefix, ossFileName, localG } func loadAllGzipImageFromLocalFolder(localGzipFolder string) { - + image.LoadFromFolderPath(localGzipFolder) } func parseAndDownloadFromOss(ossUrlPrefix, ossFileName, localGzipFolder string) bool { diff --git a/agent-operator/deploy/CmiiAppDeploy.go b/agent-operator/deploy/CmiiAppDeploy.go deleted file mode 100644 index 924c612..0000000 --- a/agent-operator/deploy/CmiiAppDeploy.go +++ /dev/null @@ -1,90 +0,0 @@ -package deploy - -import ( - "bytes" - "fmt" - v1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - "sigs.k8s.io/yaml" - "text/template" - "wdd.io/agent-common/utils" -) - -type CommonEnvironmentConfig struct { - WebIP string - WebPort string - HarborIP string - HarborPort string -} - -type CmiiBackendDeploymentConfig struct { - Namespace string - AppName string - ImageTag string - TagVersion string - Replicas string - NodePort string - NeedPvcCache bool - CustomJvmOpt string -} - -type CmiiFrontendDeploymentConfig struct { - Namespace string - AppName string - ImageTag string - TagVersion string - Replicas string - ShortName string -} - -func (backend CmiiBackendDeploymentConfig) ParseToApplyConf() *appsv1.DeploymentApplyConfiguration { - - // 解析模板 - - tmpl, err := template.New("cmiiBackendDeploymentTemplate").Parse(cmiiBackendDeploymentTemplate) - if err != nil { - panic(err) - } - - // 应用数据并打印结果 - var result bytes.Buffer - err = tmpl.Execute(&result, backend) - if err != nil { - panic(err) - } - - // 创建Deployment对象 - deployment := v1.Deployment{} - err = yaml.Unmarshal(result.Bytes(), &deployment) - if err != nil { - panic(err) - } - - utils.BeautifulPrint(&deployment) - - // service - parse, err := template.New("cmiiBackendServiceTemplate").Parse(cmiiBackendServiceTemplate) - if err != nil { - panic(err) - } - // 应用数据并打印结果 - var resulta bytes.Buffer - err = parse.Execute(&resulta, backend) - if err != nil { - panic(err) - } - - fmt.Println(resulta.String()) - - // 创建Deployment对象 - service := corev1.Service{} - err = yaml.Unmarshal(resulta.Bytes(), &service) - if err != nil { - panic(err) - } - - utils.BeautifulPrint(&service) - - return nil -} diff --git a/agent-operator/deploy/OctopusDeploy.go b/agent-operator/deploy/OctopusDeploy.go new file mode 100644 index 0000000..42d8146 --- /dev/null +++ b/agent-operator/deploy/OctopusDeploy.go @@ -0,0 +1,99 @@ +package deploy + +import ( + "wdd.io/agent-common/logger" + "wdd.io/agent-operator/deploy/a_dashboard" + "wdd.io/agent-operator/deploy/a_nfs" + "wdd.io/agent-operator/deploy/b_middle" + "wdd.io/agent-operator/deploy/c_app" + "wdd.io/agent-operator/deploy/z_dep" +) + +var log = logger.Log + +func OctopusDeploy() { + + // common environment + common := &z_dep.CommonEnvironmentConfig{ + WebIP: "10.100.2.121", + WebPort: "8888", + HarborIP: "10.100.2.121", + HarborPort: "8033", + Namespace: "zjjt", + TagVersion: "5.5.0", + TenantEnv: "", + MinioPublicIP: "10.100.2.116", + MinioInnerIP: "10.100.2.116", + NFSServerIP: "10.100.2.121", + } + + a_dashboard.K8sDashboardDeploy(common) + + a_nfs.NFSDeploy(common) + a_nfs.NFSTestDeploy(common) + + // pvc + b_middle.PVCDeploy(common) + + // middlewares + b_middle.MidMySQlDeploy(common) + b_middle.MidRedisDeploy(common) + b_middle.MidEmqxDeploy(common) + b_middle.MidMongoDeploy(common) + b_middle.MidRabbitMQDeploy(common) + b_middle.MidRabbitMQDeploy(common) + b_middle.MidNacosDeploy(common) + + configMapDeploy(common) + c_app.IngressDeploy(common) + // + backendDeploy(common) + frontendDeploy(common) + c_app.SrsDeploy(common) +} + +func backendDeploy(common *z_dep.CommonEnvironmentConfig) { + backendMap := map[string]string{ + "cmii-admin-data": "5.2.0", + "cmii-admin-gateway": "5.2.0", + "cmii-admin-user": "5.2.0", + } + + for appName, tag := range backendMap { + c_app.DefaultCmiiBackendConfig.AppName = appName + c_app.DefaultCmiiBackendConfig.ImageTag = tag + c_app.DefaultCmiiBackendConfig.Replicas = "1" + c_app.DefaultCmiiBackendConfig.BackendDeploy(common) + } +} + +func frontendDeploy(common *z_dep.CommonEnvironmentConfig) { + frontendMap := map[string]string{ + "cmii-admin-web": "5.2.0", + } + + for appName, tag := range frontendMap { + c_app.DefaultCmiiFrontendConfig.AppName = appName + c_app.DefaultCmiiFrontendConfig.ImageTag = tag + c_app.DefaultCmiiFrontendConfig.Replicas = "1" + c_app.DefaultCmiiFrontendConfig.FrontendDeploy(common) + } +} + +func configMapDeploy(common *z_dep.CommonEnvironmentConfig) { + + for frontendName, shortName := range c_app.FrontendShortNameMaps { + c_app.DefaultCmiiFrontendConfig.AppName = frontendName + c_app.DefaultCmiiFrontendConfig.ShortName = shortName + + value, ok := c_app.FrontendClientIdMaps[frontendName] + if !ok { + log.ErrorF("FrontendClientIdMaps error ! not contains %s", frontendName) + continue + } + + c_app.DefaultCmiiFrontendConfig.ClientId = value + + c_app.DefaultCmiiFrontendConfig.ConfigMapDeploy(common) + } +} diff --git a/agent-operator/deploy/OctopusDeploy_test.go b/agent-operator/deploy/OctopusDeploy_test.go new file mode 100644 index 0000000..c322da7 --- /dev/null +++ b/agent-operator/deploy/OctopusDeploy_test.go @@ -0,0 +1,8 @@ +package deploy + +import "testing" + +func TestOctopusDeploy(t *testing.T) { + + OctopusDeploy() +} diff --git a/agent-operator/deploy/a_dashboard/DeployK8sDashboard.go b/agent-operator/deploy/a_dashboard/DeployK8sDashboard.go new file mode 100644 index 0000000..15d19b0 --- /dev/null +++ b/agent-operator/deploy/a_dashboard/DeployK8sDashboard.go @@ -0,0 +1,22 @@ +package a_dashboard + +import ( + "wdd.io/agent-common/logger" + "wdd.io/agent-operator/deploy/z_dep" +) + +var ( + K8sDashboardApplyFilePath = "" + log = logger.Log +) + +func init() { + + K8sDashboardApplyFilePath = z_dep.ApplyFilePrefix + "k8s-dashboard.yaml" + + log.InfoF("K8sDashboardApplyFilePath: %s", K8sDashboardApplyFilePath) +} + +func K8sDashboardDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiK8sDashboardTemplate, K8sDashboardApplyFilePath) +} diff --git a/agent-operator/deploy/a_dashboard/TemplateK8SDashboard.go b/agent-operator/deploy/a_dashboard/TemplateK8SDashboard.go new file mode 100644 index 0000000..b7f8856 --- /dev/null +++ b/agent-operator/deploy/a_dashboard/TemplateK8SDashboard.go @@ -0,0 +1,310 @@ +package a_dashboard + +const CmiiK8sDashboardTemplate = ` +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + name: kubernetes-dashboard + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + nodePort: 30554 + selector: + k8s-app: kubernetes-dashboard + type: NodePort + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kube-system +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kube-system +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kube-system + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/dashboard:v2.0.1 + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kube-system + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/metrics-scraper:v1.0.4 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: admin-user + namespace: kube-system +` diff --git a/agent-operator/deploy/a_nfs/DeployNFS.go b/agent-operator/deploy/a_nfs/DeployNFS.go new file mode 100644 index 0000000..9e88a0d --- /dev/null +++ b/agent-operator/deploy/a_nfs/DeployNFS.go @@ -0,0 +1,33 @@ +package a_nfs + +import ( + "wdd.io/agent-common/logger" + "wdd.io/agent-operator/deploy/z_dep" +) + +var ( + NfsApplyFilePath = "" + NfsTestApplyFilePath = "" + log = logger.Log +) + +type NfsDeployConfig struct { + NfsLocalPath string +} + +func init() { + + NfsApplyFilePath = z_dep.ApplyFilePrefix + "k8s-nfs.yaml" + NfsTestApplyFilePath = z_dep.ApplyFilePrefix + "k8s-nfs-test.yaml" + + log.InfoF("NfsApplyFilePath : %s\n", NfsApplyFilePath) + log.InfoF("NfsTestApplyFilePath : %s\n", NfsTestApplyFilePath) +} + +func NFSDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiNfsTemplate, NfsApplyFilePath) +} + +func NFSTestDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiNFSTestTemplate, NfsTestApplyFilePath) +} diff --git a/agent-operator/deploy/a_nfs/TemplateNFS.go b/agent-operator/deploy/a_nfs/TemplateNFS.go new file mode 100644 index 0000000..ac3431a --- /dev/null +++ b/agent-operator/deploy/a_nfs/TemplateNFS.go @@ -0,0 +1,115 @@ +package a_nfs + +const CmiiNfsTemplate = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #根据实际环境设定namespace,下面类同 +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: ClusterRole +# name: nfs-client-provisioner-runner + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs-prod-distribute +provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #与RBAC文件中的namespace保持一致 +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: cmlc-nfs-storage + - name: NFS_SERVER + value: {{ .NFSServerIP }} + - name: NFS_PATH + value: /var/lib/docker/nfs_data + volumes: + - name: nfs-client-root + nfs: + server: {{ .NFSServerIP }} + path: /var/lib/docker/nfs_data +` diff --git a/agent-operator/deploy/a_nfs/TemplateNFSTest.go b/agent-operator/deploy/a_nfs/TemplateNFSTest.go new file mode 100644 index 0000000..4fe0889 --- /dev/null +++ b/agent-operator/deploy/a_nfs/TemplateNFSTest.go @@ -0,0 +1,39 @@ +package a_nfs + +const CmiiNFSTestTemplate = ` +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-prod-distribute + resources: + requests: + storage: 1Mi +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod +spec: + containers: + - name: test-pod + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/busybox + command: + - "/bin/sh" + args: + - "-c" + - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim #与PVC名称保持一致 +` diff --git a/agent-operator/deploy/b_middle/CmiiEmqxTemplate.go b/agent-operator/deploy/b_middle/CmiiEmqxTemplate.go new file mode 100644 index 0000000..52f9127 --- /dev/null +++ b/agent-operator/deploy/b_middle/CmiiEmqxTemplate.go @@ -0,0 +1,266 @@ +package b_middle + +const CmiiEmqxTemplate = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-emqxs + namespace: {{ .Namespace }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-env + namespace: {{ .Namespace }} + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} +data: + EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 + EMQX_NAME: helm-emqxs + EMQX_CLUSTER__DISCOVERY: k8s + EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs + EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" + EMQX_CLUSTER__K8S__namespace: {{ .Namespace }} + EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local + EMQX_ALLOW_ANONYMOUS: "false" + EMQX_ACL_NOMATCH: "deny" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-cm + namespace: {{ .Namespace }} + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} +data: + emqx_auth_username.conf: |- + auth.user.1.username = cmlc + auth.user.1.password = odD8#Ve7.B + auth.user.password_hash = sha256 + + acl.conf: |- + {allow, {user, "admin"}, pubsub, ["admin/#"]}. + {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. + {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. + {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. + {allow, all}. + + loaded_plugins: |- + {emqx_auth_username,true}. + {emqx_management, true}. + {emqx_recon, true}. + {emqx_retainer, false}. + {emqx_dashboard, true}. + {emqx_telemetry, true}. + {emqx_rule_engine, true}. + {emqx_bridge_mqtt, false}. +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-emqxs + namespace: {{ .Namespace }} + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} +spec: + replicas: 1 + serviceName: helm-emqxs-headless + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + template: + metadata: + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} + spec: + affinity: {} + serviceAccountName: helm-emqxs + containers: + - name: helm-emqxs + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/emqx:5.5.1 + imagePullPolicy: Always + ports: + - name: mqtt + containerPort: 1883 + - name: mqttssl + containerPort: 8883 + - name: mgmt + containerPort: 8081 + - name: ws + containerPort: 8083 + - name: wss + containerPort: 8084 + - name: dashboard + containerPort: 18083 + - name: ekka + containerPort: 4370 + envFrom: + - configMapRef: + name: helm-emqxs-env + resources: {} + volumeMounts: + - name: emqx-data + mountPath: "/opt/emqx/data/mnesia" + readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" + subPath: emqx_auth_username.conf + readOnly: false +# - name: helm-emqxs-cm +# mountPath: "/opt/emqx/etc/acl.conf" +# subPath: "acl.conf" +# readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/data/loaded_plugins" + subPath: loaded_plugins + readOnly: false + volumes: + - name: emqx-data + persistentVolumeClaim: + claimName: helm-emqxs + - name: helm-emqxs-cm + configMap: + name: helm-emqxs-cm + items: + - key: emqx_auth_username.conf + path: emqx_auth_username.conf + - key: acl.conf + path: acl.conf + - key: loaded_plugins + path: loaded_plugins +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: {{ .Namespace }} +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - watch + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: {{ .Namespace }} +subjects: + - kind: ServiceAccount + name: helm-emqxs + namespace: {{ .Namespace }} +roleRef: + kind: Role + name: helm-emqxs + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs + namespace: {{ .Namespace }} + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} +spec: + type: NodePort + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - port: 1883 + name: mqtt + targetPort: 1883 + nodePort: 31883 + - port: 18083 + name: dashboard + targetPort: 18083 + nodePort: 38085 + - port: 8083 + name: mqtt-websocket + targetPort: 8083 + nodePort: 38083 +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs-headless + namespace: {{ .Namespace }} + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} +spec: + type: ClusterIP + clusterIP: None + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - name: mqtt + port: 1883 + protocol: TCP + targetPort: 1883 + - name: mqttssl + port: 8883 + protocol: TCP + targetPort: 8883 + - name: mgmt + port: 8081 + protocol: TCP + targetPort: 8081 + - name: websocket + port: 8083 + protocol: TCP + targetPort: 8083 + - name: wss + port: 8084 + protocol: TCP + targetPort: 8084 + - name: dashboard + port: 18083 + protocol: TCP + targetPort: 18083 + - name: ekka + port: 4370 + protocol: TCP + targetPort: 4370 +` diff --git a/agent-operator/deploy/b_middle/CmiiMongoTemplate.go b/agent-operator/deploy/b_middle/CmiiMongoTemplate.go new file mode 100644 index 0000000..95761f8 --- /dev/null +++ b/agent-operator/deploy/b_middle/CmiiMongoTemplate.go @@ -0,0 +1,78 @@ +package b_middle + +const CmiiMongoTemplate = ` +apiVersion: v1 +kind: Service +metadata: + name: helm-mongo + namespace: {{ .Namespace }} + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} +spec: + type: ClusterIP + selector: + cmii.app: helm-mongo + cmii.type: middleware + ports: + - port: 27017 + name: server-27017 + targetPort: 27017 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mongo + namespace: {{ .Namespace }} + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} +spec: + serviceName: helm-mongo + replicas: 1 + selector: + matchLabels: + cmii.app: helm-mongo + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + affinity: {} + containers: + - name: helm-mongo + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/mongo:5.0 + resources: {} + ports: + - containerPort: 27017 + name: mongo27017 + protocol: TCP + env: + - name: MONGO_INITDB_ROOT_USERNAME + value: cmlc + - name: MONGO_INITDB_ROOT_PASSWORD + value: REdPza8#oVlt + volumeMounts: + - name: mongo-data + mountPath: /data/db + readOnly: false + subPath: default/helm-mongo/data/db + volumes: + - name: mongo-data + persistentVolumeClaim: + claimName: helm-mongo +--- +` diff --git a/agent-operator/deploy/b_middle/CmiiMySQLTemplate.go b/agent-operator/deploy/b_middle/CmiiMySQLTemplate.go new file mode 100644 index 0000000..b7d3ceb --- /dev/null +++ b/agent-operator/deploy/b_middle/CmiiMySQLTemplate.go @@ -0,0 +1,411 @@ +package b_middle + +const CmiiMySQLTemplate = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-mysql + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + annotations: +secrets: + - name: helm-mysql +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-mysql + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + mysql-root-password: "UXpmWFFoZDNiUQ==" + mysql-password: "S0F0cm5PckFKNw==" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + my.cnf: |- + + [mysqld] + port=3306 + basedir=/opt/bitnami/mysql + datadir=/bitnami/mysql/data + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + socket=/opt/bitnami/mysql/tmp/mysql.sock + log-error=/bitnami/mysql/data/error.log + general_log_file = /bitnami/mysql/data/general.log + slow_query_log_file = /bitnami/mysql/data/slow.log + innodb_data_file_path = ibdata1:512M:autoextend + innodb_buffer_pool_size = 512M + innodb_buffer_pool_instances = 2 + innodb_log_file_size = 512M + innodb_log_files_in_group = 4 + innodb_log_files_in_group = 4 + log-bin = /bitnami/mysql/data/mysql-bin + max_binlog_size=1G + transaction_isolation = REPEATABLE-READ + default_storage_engine = innodb + character-set-server = utf8mb4 + collation-server=utf8mb4_bin + binlog_format = ROW + binlog_rows_query_log_events=on + binlog_cache_size=4M + binlog_expire_logs_seconds = 1296000 + max_binlog_cache_size=2G + gtid_mode = on + enforce_gtid_consistency = 1 + sync_binlog = 1 + innodb_flush_log_at_trx_commit = 1 + innodb_flush_method = O_DIRECT + log_slave_updates=1 + relay_log_recovery = 1 + relay-log-purge = 1 + default_time_zone = '+08:00' + lower_case_table_names=1 + log_bin_trust_function_creators=1 + group_concat_max_len=67108864 + innodb_io_capacity = 4000 + innodb_io_capacity_max = 8000 + innodb_flush_sync = 0 + innodb_flush_neighbors = 0 + innodb_write_io_threads = 8 + innodb_read_io_threads = 8 + innodb_purge_threads = 4 + innodb_page_cleaners = 4 + innodb_open_files = 65535 + innodb_max_dirty_pages_pct = 50 + innodb_lru_scan_depth = 4000 + innodb_checksum_algorithm = crc32 + innodb_lock_wait_timeout = 10 + innodb_rollback_on_timeout = 1 + innodb_print_all_deadlocks = 1 + innodb_file_per_table = 1 + innodb_online_alter_log_max_size = 4G + innodb_stats_on_metadata = 0 + innodb_thread_concurrency = 0 + innodb_sync_spin_loops = 100 + innodb_spin_wait_delay = 30 + lock_wait_timeout = 3600 + slow_query_log = 1 + long_query_time = 10 + log_queries_not_using_indexes =1 + log_throttle_queries_not_using_indexes = 60 + min_examined_row_limit = 100 + log_slow_admin_statements = 1 + log_slow_slave_statements = 1 + default_authentication_plugin=mysql_native_password + skip-name-resolve=1 + explicit_defaults_for_timestamp=1 + plugin_dir=/opt/bitnami/mysql/plugin + max_allowed_packet=128M + max_connections = 2000 + max_connect_errors = 1000000 + table_definition_cache=2000 + table_open_cache_instances=64 + tablespace_definition_cache=1024 + thread_cache_size=256 + interactive_timeout = 600 + wait_timeout = 600 + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=32M + bind-address=0.0.0.0 + performance_schema = 1 + performance_schema_instrument = '%memory%=on' + performance_schema_instrument = '%lock%=on' + innodb_monitor_enable=ALL + + [mysql] + no-auto-rehash + + [mysqldump] + quick + max_allowed_packet = 32M + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql-init-scripts + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + create_users_grants_core.sql: |- + create user zyly@'%' identified by 'Cmii@451315'; + grant select on *.* to zyly@'%'; + create user zyly_qc@'%' identified by 'Uh)E_owCyb16'; + grant all on *.* to zyly_qc@'%'; + create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; + grant all on *.* to k8s_admin@'%'; + create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; + grant all on *.* to audit_dba@'%'; + create user db_backup@'%' identified by 'RU5Pu(4FGdT9'; + GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%'; + create user monitor@'%' identified by 'PL3#nGtrWbf-'; + grant REPLICATION CLIENT on *.* to monitor@'%'; + flush privileges; +--- +kind: Service +apiVersion: v1 +metadata: + name: cmii-mysql + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/component: primary + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: {{ .Namespace }} + cmii.app: mysql + cmii.type: middleware + octopus.control: mysql-db-wdd +spec: + ports: + - name: mysql + protocol: TCP + port: 13306 + targetPort: mysql + selector: + app.kubernetes.io/component: primary + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: {{ .Namespace }} + cmii.app: mysql + cmii.type: middleware + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql-headless + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: 3306 + targetPort: mysql + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: {{ .Namespace }} + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: +spec: + type: NodePort + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: mysql + nodePort: 33306 + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: {{ .Namespace }} + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mysql + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: {{ .Namespace }} + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + serviceName: helm-mysql + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + spec: + serviceAccountName: helm-mysql + affinity: {} + nodeSelector: + mysql-deploy: "true" + securityContext: + fsGroup: 1001 + initContainers: + - name: change-volume-permissions + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + - -ec + - | + chown -R 1001:1001 /bitnami/mysql + securityContext: + runAsUser: 0 + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + containers: + - name: mysql + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/mysql:8.1.0-debian-11-r42 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "true" + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: helm-mysql + key: mysql-root-password + - name: MYSQL_DATABASE + value: "cmii" + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + readinessProbe: + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + resources: + limits: {} + requests: {} + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + volumes: + - name: config + configMap: + name: helm-mysql + - name: custom-init-scripts + configMap: + name: helm-mysql-init-scripts + - name: mysql-data + hostPath: + path: /var/lib/docker/mysql-pv +` diff --git a/agent-operator/deploy/b_middle/CmiiNacosTemplate.go b/agent-operator/deploy/b_middle/CmiiNacosTemplate.go new file mode 100644 index 0000000..96b2c51 --- /dev/null +++ b/agent-operator/deploy/b_middle/CmiiNacosTemplate.go @@ -0,0 +1,129 @@ +package b_middle + +const CmiiNacosTemplate = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-nacos-cm + namespace: {{ .Namespace }} + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .TagVersion }} +data: + mysql.db.name: "cmii_nacos_config" + mysql.db.host: "helm-mysql" + mysql.port: "3306" + mysql.user: "k8s_admin" + mysql.password: "fP#UaH6qQ3)8" +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-nacos + namespace: {{ .Namespace }} + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .TagVersion }} +spec: + type: NodePort + selector: + cmii.app: helm-nacos + cmii.type: middleware + ports: + - port: 8848 + name: server + targetPort: 8848 + nodePort: 38989 + - port: 9848 + name: server12 + targetPort: 9848 + nodePort: 38912 + - port: 9849 + name: server23 + targetPort: 9849 + nodePort: 38923 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-nacos + namespace: {{ .Namespace }} + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .TagVersion }} +spec: + serviceName: helm-nacos + replicas: 1 + selector: + matchLabels: + cmii.app: helm-nacos + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/version: {{ .TagVersion }} + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + affinity: {} + containers: + - name: nacos-server + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/nacos-server:v2.1.2 + ports: + - containerPort: 8848 + name: dashboard + env: + - name: NACOS_AUTH_ENABLE + value: "false" + - name: NACOS_REPLICAS + value: "1" + - name: MYSQL_SERVICE_DB_NAME + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.name + - name: MYSQL_SERVICE_PORT + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.port + - name: MYSQL_SERVICE_USER + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.user + - name: MYSQL_SERVICE_PASSWORD + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.password + - name: MYSQL_SERVICE_HOST + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.host + - name: NACOS_SERVER_PORT + value: "8848" + - name: NACOS_APPLICATION_PORT + value: "8848" + - name: PREFER_HOST_MODE + value: "hostname" + - name: MODE + value: standalone + - name: SPRING_DATASOURCE_PLATFORM + value: mysql +--- +` diff --git a/agent-operator/deploy/b_middle/CmiiRabbitMQTemplate.go b/agent-operator/deploy/b_middle/CmiiRabbitMQTemplate.go new file mode 100644 index 0000000..e103fb5 --- /dev/null +++ b/agent-operator/deploy/b_middle/CmiiRabbitMQTemplate.go @@ -0,0 +1,330 @@ +package b_middle + +const CmiiRabbitMQTemplate = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-rabbitmq + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +automountServiceAccountToken: true +secrets: + - name: helm-rabbitmq +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-rabbitmq + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +type: Opaque +data: + rabbitmq-password: "blljUk45MXIuX2hq" + rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-rabbitmq-config + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +data: + rabbitmq.conf: |- + ## Username and password + ## + default_user = admin + default_pass = nYcRN91r._hj + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + #default_vhost = default-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +subjects: + - kind: ServiceAccount + name: helm-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helm-rabbitmq-endpoint-reader +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq-headless + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: dashboard + port: 15672 + targetPort: stats + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: {{ .Namespace }} + publishNotReadyAddresses: true +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +spec: + type: NodePort + ports: + - name: amqp + port: 5672 + targetPort: amqp + nodePort: 35672 + - name: dashboard + port: 15672 + targetPort: dashboard + nodePort: 35675 + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: {{ .Namespace }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-rabbitmq + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq +spec: + serviceName: helm-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: {{ .Namespace }} + template: + metadata: + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: rabbitmq + annotations: + checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 + checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f + spec: + + serviceAccountName: helm-rabbitmq + affinity: {} + securityContext: + fsGroup: 5001 + runAsUser: 5001 + terminationGracePeriodSeconds: 120 + initContainers: + - name: volume-permissions + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/bitnami-shell:10-debian-10-r140 + imagePullPolicy: "Always" + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + resources: + limits: {} + requests: {} + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + containers: + - name: rabbitmq + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/rabbitmq:3.9.12-debian-10-r3 + imagePullPolicy: "Always" + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "helm-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_FORCE_BOOT + value: "no" + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: "no" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: "admin" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" + ports: + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: dashboard + containerPort: 15672 + - name: epmd + containerPort: 4369 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: 120 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" + else + rabbitmqctl stop_app + fi + resources: + limits: {} + requests: {} + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + volumes: + - name: configuration + configMap: + name: helm-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - name: data + persistentVolumeClaim: + claimName: helm-rabbitmq +` diff --git a/agent-operator/deploy/b_middle/CmiiRedisTemplate.go b/agent-operator/deploy/b_middle/CmiiRedisTemplate.go new file mode 100644 index 0000000..c76152a --- /dev/null +++ b/agent-operator/deploy/b_middle/CmiiRedisTemplate.go @@ -0,0 +1,585 @@ +package b_middle + +const CmiiRedisTemplate = ` +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: helm-redis + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-redis + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + redis-password: "TWNhY2hlQDQ1MjI=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-configuration + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus +data: + redis.conf: |- + # User-supplied common configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # End of common configuration + master.conf: |- + dir /data + # User-supplied master configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of master configuration + replica.conf: |- + dir /data + slave-read-only yes + # User-supplied replica configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of replica configuration +--- +# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-health + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-scripts + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus +data: + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + exec redis-server "${ARGS[@]}" + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo 26379 + ;; + "REDIS") + echo 6379 + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + echo "${hostname}.${HEADLESS_SERVICE}" + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + exec redis-server "${ARGS[@]}" +--- +# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-headless + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: {{ .Namespace }} +--- +# Source: outside-deploy/charts/redis-db/templates/master/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-master + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + type: ClusterIP + + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: {{ .Namespace }} + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-replicas + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + type: ClusterIP + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/component: replica +--- +# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-master + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: {{ .Namespace }} + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + affinity: {} + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: {} + - name: tmp + emptyDir: {} + - name: redis-data + emptyDir: {} +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-replicas + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/component: replica + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: {{ .Namespace }} + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: helm-redis-master-0.helm-redis-headless.{{ .Namespace }}.svc.cluster.local + - name: REDIS_MASTER_PORT_NUMBER + value: "6379" + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: {} + - name: redis-data + emptyDir: {} + +` diff --git a/agent-operator/deploy/b_middle/DeployMiddleware.go b/agent-operator/deploy/b_middle/DeployMiddleware.go new file mode 100644 index 0000000..b9d39ec --- /dev/null +++ b/agent-operator/deploy/b_middle/DeployMiddleware.go @@ -0,0 +1,62 @@ +package b_middle + +import ( + "wdd.io/agent-common/logger" + "wdd.io/agent-operator/deploy/z_dep" +) + +var ( + EmqxApplyFilePath = "" + MongoApplyFilePath = "" + RabbitMQApplyFilePath = "" + RedisApplyFilePath = "" + MySQLApplyFilePath = "" + NacosApplyFilePath = "" + PVCApplyFilePath = "" + log = logger.Log +) + +func init() { + + EmqxApplyFilePath = z_dep.ApplyFilePrefix + "k8s-emqx.yaml" + MongoApplyFilePath = z_dep.ApplyFilePrefix + "k8s-mongo.yaml" + RabbitMQApplyFilePath = z_dep.ApplyFilePrefix + "k8s-rabbitmq.yaml" + RedisApplyFilePath = z_dep.ApplyFilePrefix + "k8s-redis.yaml" + MySQLApplyFilePath = z_dep.ApplyFilePrefix + "k8s-mysql.yaml" + NacosApplyFilePath = z_dep.ApplyFilePrefix + "k8s-nacos.yaml" + PVCApplyFilePath = z_dep.ApplyFilePrefix + "k8s-pvc.yaml" + + log.DebugF("EmqxApplyFilePath: %s", EmqxApplyFilePath) + log.DebugF("MongoApplyFilePath: %s", MongoApplyFilePath) + log.DebugF("RabbitMQApplyFilePath: %s", RabbitMQApplyFilePath) + log.DebugF("RedisApplyFilePath: %s", RedisApplyFilePath) + log.DebugF("MySQLApplyFilePath: %s", MySQLApplyFilePath) + log.DebugF("NacosApplyFilePath: %s", NacosApplyFilePath) + log.DebugF("PVCApplyFilePath: %s", PVCApplyFilePath) +} + +func MidEmqxDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiEmqxTemplate, EmqxApplyFilePath) +} + +func MidMongoDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiMongoTemplate, MongoApplyFilePath) +} +func MidRabbitMQDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiRabbitMQTemplate, RabbitMQApplyFilePath) +} +func MidRedisDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiRedisTemplate, RedisApplyFilePath) +} + +func MidMySQlDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiMySQLTemplate, MySQLApplyFilePath) +} + +func MidNacosDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiNacosTemplate, NacosApplyFilePath) +} + +func PVCDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + return commonEnv.ParseCommonEnvToApplyFile(CmiiPVCTemplate, PVCApplyFilePath) +} diff --git a/agent-operator/deploy/b_middle/TemplateCmiiPVC.go b/agent-operator/deploy/b_middle/TemplateCmiiPVC.go new file mode 100644 index 0000000..73544c7 --- /dev/null +++ b/agent-operator/deploy/b_middle/TemplateCmiiPVC.go @@ -0,0 +1,79 @@ +package b_middle + +const CmiiPVCTemplate = ` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-backend-log-pvc + namespace: {{ .Namespace }} + labels: + cmii.type: middleware-base + cmii.app: nfs-backend-log-pvc + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: {{ .TagVersion }} +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-emqxs + namespace: {{ .Namespace }} + labels: + cmii.type: middleware-base + cmii.app: helm-emqxs + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: {{ .TagVersion }} +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-mongo + namespace: {{ .Namespace }} + labels: + cmii.type: middleware-base + cmii.app: helm-mongo + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: {{ .TagVersion }} +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 30Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-rabbitmq + namespace: {{ .Namespace }} + labels: + cmii.type: middleware-base + cmii.app: helm-rabbitmq + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: {{ .TagVersion }} +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi +` diff --git a/agent-operator/deploy/c_app/DeployCmiiApp.go b/agent-operator/deploy/c_app/DeployCmiiApp.go new file mode 100644 index 0000000..12959f4 --- /dev/null +++ b/agent-operator/deploy/c_app/DeployCmiiApp.go @@ -0,0 +1,138 @@ +package c_app + +import ( + "github.com/go-playground/validator/v10" + "wdd.io/agent-common/logger" + "wdd.io/agent-operator/deploy/z_dep" +) + +var log = logger.Log + +type CmiiBackendConfig struct { + z_dep.CommonEnvironmentConfig + AppName string `json:"app_name,omitempty" validate:"required"` + ImageTag string `json:"image_tag,omitempty" validate:"required"` + Replicas string `json:"replicas,omitempty" validate:"required" default:"1"` + NodePort string `json:"node_port,omitempty"` + NeedPvcCache bool `json:"need_pvc_cache,omitempty"` + CustomJvmOpt string `json:"custom_jvm_opt,omitempty"` +} + +type CmiiFrontendConfig struct { + z_dep.CommonEnvironmentConfig `json:"z___dep_._common_environment_config"` + AppName string `json:"app_name,omitempty" validate:"required"` + ImageTag string `json:"image_tag,omitempty" validate:"required"` + Replicas string `json:"replicas,omitempty" validate:"required" default:"1"` + ShortName string `json:"short_name,omitempty"` + ClientId string +} + +var ( + DefaultCmiiBackendConfig = &CmiiBackendConfig{} + DefaultCmiiFrontendConfig = &CmiiFrontendConfig{} + BackendApplyFilePath = "" + FrontendApplyFilePath = "" + SRSApplyFilePath = "" + IngresApplyFilePath = "" + ConfigMapApplyFilePath = "" +) + +func init() { + BackendApplyFilePath = z_dep.ApplyFilePrefix + "k8s-backend.yaml" + FrontendApplyFilePath = z_dep.ApplyFilePrefix + "k8s-frontend.yaml" + SRSApplyFilePath = z_dep.ApplyFilePrefix + "k8s-srs.yaml" + IngresApplyFilePath = z_dep.ApplyFilePrefix + "k8s-ingress.yaml" + ConfigMapApplyFilePath = z_dep.ApplyFilePrefix + "k8s-configmap.yaml" + + log.DebugF("backend apply file path: %s\n", BackendApplyFilePath) + log.DebugF("frontend apply file path: %s\n", FrontendApplyFilePath) + log.DebugF("srs apply file path: %s\n", SRSApplyFilePath) + log.DebugF("ingress apply file path: %s\n", IngresApplyFilePath) + log.DebugF("config map apply file path: %s\n", ConfigMapApplyFilePath) +} + +func (backend *CmiiBackendConfig) BackendDeploy(common *z_dep.CommonEnvironmentConfig) bool { + + // copy + z_dep.CopySameFields(common, backend) + + validate := validator.New() + err := validate.Struct(backend) + if err != nil { + log.ErrorF("backend config validate error: %v\n", err) + return false + } + + if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendDeploymentTemplate, BackendApplyFilePath) { + return false + } + if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendServiceTemplate, BackendApplyFilePath) { + return false + } + + // pvc + if backend.NeedPvcCache { + if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendPVCTemplate, BackendApplyFilePath) { + return false + } + } + + return true +} + +func (frontend *CmiiFrontendConfig) FrontendDeploy(common *z_dep.CommonEnvironmentConfig) bool { + + // copy + z_dep.CopySameFields(common, frontend) + + validate := validator.New() + err := validate.Struct(frontend) + if err != nil { + log.ErrorF("backend config validate error: %v\n", err) + return false + } + + if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendDeploymentTemplate, FrontendApplyFilePath) { + return false + } + if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendServiceTemplate, FrontendApplyFilePath) { + return false + } + + return true +} + +func SrsDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + + return commonEnv.ParseCommonEnvToApplyFile(CmiiSrsTemplate, SRSApplyFilePath) +} + +func IngressDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + if !commonEnv.ParseCommonEnvToApplyFile(CmiiFrontendIngressTemplate, IngresApplyFilePath) { + return false + } + + if !commonEnv.ParseCommonEnvToApplyFile(CmiiBackendIngressTemplate, IngresApplyFilePath) { + return false + } + + if !commonEnv.ParseCommonEnvToApplyFile(CmiiGatewayIngressTemplate, IngresApplyFilePath) { + return false + } + + return true +} + +func (frontend *CmiiFrontendConfig) ConfigMapDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool { + + // copy + z_dep.CopySameFields(commonEnv, frontend) + + // manual validate + if frontend.ShortName == "" || frontend.ClientId == "" { + log.ErrorF("short name or client id is empty !") + return false + } + + return z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendConfigMapTemplate, ConfigMapApplyFilePath) +} diff --git a/agent-operator/deploy/CmiiAppDeploy_test.go b/agent-operator/deploy/c_app/DeployCmiiApp_test.go similarity index 62% rename from agent-operator/deploy/CmiiAppDeploy_test.go rename to agent-operator/deploy/c_app/DeployCmiiApp_test.go index 4c45381..c8be83d 100644 --- a/agent-operator/deploy/CmiiAppDeploy_test.go +++ b/agent-operator/deploy/c_app/DeployCmiiApp_test.go @@ -1,19 +1,18 @@ -package deploy +package c_app -import "testing" +import ( + "testing" +) func TestCmiiBackendDeploymentConfig_ParseToApplyConf(t *testing.T) { - deploymentConfig := CmiiBackendDeploymentConfig{ + deploymentConfig := CmiiBackendConfig{ Namespace: "uavcloud-dev", AppName: "cmii-uav-gateway", ImageTag: "5.2.0-123", - TagVersion: "5.2.0", Replicas: "2", NodePort: "31213", NeedPvcCache: true, } - deploymentConfig.ParseToApplyConf() - } diff --git a/agent-operator/deploy/c_app/FrontendConfigMap.go b/agent-operator/deploy/c_app/FrontendConfigMap.go new file mode 100644 index 0000000..5882fc8 --- /dev/null +++ b/agent-operator/deploy/c_app/FrontendConfigMap.go @@ -0,0 +1,53 @@ +package c_app + +var FrontendShortNameMaps = map[string]string{ + "cmii-suav-platform-supervision": "supervision", + "cmii-suav-platform-supervisionh5": "supervisionh5", + "cmii-uav-platform": "platform", + "cmii-uav-platform-ai-brain": "ai-brain", + "cmii-uav-platform-armypeople": "armypeople", + "cmii-uav-platform-base": "base", + "cmii-uav-platform-cms-portal": "cmsportal", + "cmii-uav-platform-detection": "detection", + "cmii-uav-platform-emergency-rescue": "emergency", + "cmii-uav-platform-logistics": "logistics", + "cmii-uav-platform-media": "media", + "cmii-uav-platform-multiterminal": "multiterminal", + "cmii-uav-platform-mws": "mws", + "cmii-uav-platform-oms": "oms", + "cmii-uav-platform-open": "open", + "cmii-uav-platform-security": "security", + "cmii-uav-platform-securityh5": "securityh5", + "cmii-uav-platform-seniclive": "seniclive", + "cmii-uav-platform-share": "share", + "cmii-uav-platform-splice": "splice", + "cmii-uav-platform-traffic": "traffic", +} + +var FrontendClientIdMaps = map[string]string{ + "cmii-suav-platform-supervision": "APP_qqSu82THfexI8PLM", + "cmii-suav-platform-supervisionh5": "APP_qqSu82THfexI8PLM", + "cmii-uav-platform": "empty", + "cmii-uav-platform-ai-brain": "APP_rafnuCAmBESIVYMH", + "cmii-uav-platform-armypeople": "APP_UIegse6Lfou9pO1U", + "cmii-uav-platform-base": "APP_9LY41OaKSqk2btY0", + "cmii-uav-platform-cms-portal": "empty", + "cmii-uav-platform-detection": "APP_FDHW2VLVDWPnnOCy", + "cmii-uav-platform-emergency-rescue": "APP_aGsTAY1uMZrpKdfk", + "cmii-uav-platform-logistics": "APP_PvdfRRRBPL8xbIwl", + "cmii-uav-platform-media": "APP_4AU8lbifESQO4FD6", + "cmii-uav-platform-multiterminal": "APP_PvdfRRRBPL8xbIwl", + "cmii-uav-platform-mws": "APP_uKniXPELlRERBBwK", + "cmii-uav-platform-oms": "empty", + "cmii-uav-platform-open": "empty", + "cmii-uav-platform-qingdao": "empty", + "cmii-uav-platform-qinghaitourism": "empty", + "cmii-uav-platform-security": "APP_JUSEMc7afyWXxvE7", + "cmii-uav-platform-securityh5": "APP_N3ImO0Ubfu9peRHD", + "cmii-uav-platform-seniclive": "empty", + "cmii-uav-platform-share": "APP_4lVSVI0ZGxTssir8", + "cmii-uav-platform-splice": "APP_zE0M3sTRXrCIJS8Y", + "cmii-uav-platform-threedsimulation": "empty", + "cmii-uav-platform-visualization": "empty", + "cmii-uav-platform-traffic": "APP_Jc8i2wOQ1t73QEJS", +} diff --git a/agent-operator/deploy/CmiiAppDeployTemplate.go b/agent-operator/deploy/c_app/TemplateCmiiBackend.go similarity index 63% rename from agent-operator/deploy/CmiiAppDeployTemplate.go rename to agent-operator/deploy/c_app/TemplateCmiiBackend.go index 57f1ff1..a64e453 100644 --- a/agent-operator/deploy/CmiiAppDeployTemplate.go +++ b/agent-operator/deploy/c_app/TemplateCmiiBackend.go @@ -1,6 +1,6 @@ -package deploy +package c_app -const cmiiBackendDeploymentTemplate = ` +const CmiiBackendDeploymentTemplate = ` apiVersion: apps/v1 kind: Deployment metadata: @@ -10,7 +10,7 @@ metadata: cmii.type: backend cmii.app: {{ .AppName }} octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus/control + app.kubernetes.io/managed-by: octopus app.kubernetes.io/app-version: {{ .TagVersion }} spec: replicas: {{ .Replicas }} @@ -40,7 +40,7 @@ spec: - name: harborsecret containers: - name: {{ .AppName }} - image: "harbor.cdcyy.com.cn/cmii/{{ .AppName }}:{{ .ImageTag }}" + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }} imagePullPolicy: Always env: - name: K8S_NAMESPACE @@ -72,9 +72,9 @@ spec: containerPort: 8080 protocol: TCP resources: - limits: + limits: . memory: 2Gi - cpu: 2 + cpu: "2" requests: memory: 1Gi cpu: 200m @@ -128,9 +128,9 @@ spec: persistentVolumeClaim: claimName: {{ .AppName }}-cache {{- end }} - ` +` -const cmiiBackendServiceTemplate = ` +const CmiiBackendServiceTemplate = ` apiVersion: v1 kind: Service metadata: @@ -140,7 +140,7 @@ metadata: cmii.type: backend cmii.app: {{ .AppName }} octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus/control + app.kubernetes.io/managed-by: octopus app.kubernetes.io/app-version: {{ .TagVersion }} spec: {{- if .NodePort }} @@ -159,8 +159,9 @@ spec: {{- if .NodePort }} nodePort: {{ .NodePort }} {{- end }} - ` -const cmiiBackendPVCTemplate = ` +` + +const CmiiBackendPVCTemplate = ` apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -170,7 +171,7 @@ metadata: cmii.type: backend cmii.app: {{ .AppName }} octopus/control: backend-app-1.0.0 - app.kubernetes.io/managed-by: octopus/control + app.kubernetes.io/managed-by: octopus app.kubernetes.io/app-version: {{ .TagVersion }} spec: storageClassName: nfs-prod-distribute @@ -180,84 +181,4 @@ spec: resources: requests: storage: 15Gi - ` - -const cmiiFrontendDeploymentTemplate = ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .AppName }} - namespace: {{ .Namespace }} - labels: - cmii.type: frontend - cmii.app: {{ .AppName }} - octopus/control: frontend-app-1.0.0 - app.kubernetes.io/managed-by: octopus/control - app.kubernetes.io/app-version: {{ .TagVersion }} -spec: - replicas: {{ .Replicas }} - strategy: - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - cmii.type: frontend - cmii.app: {{ .AppName }} - template: - metadata: - labels: - cmii.type: frontend - cmii.app: {{ .AppName }} - spec: - imagePullSecrets: - - name: harborsecret - containers: - - name: {{ .AppName }} - image: "harbor.cdcyy.com.cn/cmii/{{ .AppName }}:{{ .ImageTag }}" - imagePullPolicy: Always - env: - - name: K8S_NAMESPACE - value: {{ .Namespace }} - - name: APPLICATION_NAME - value: {{ .AppName }} - ports: - - name: platform-9528 - containerPort: 9528 - protocol: TCP - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi - volumeMounts: - - name: nginx-conf - mountPath: /usr/local/nginx/conf/nginx.conf - subPath: nginx.conf - - name: default-nginx-conf - mountPath: /etc/nginx/conf.d/default.conf - subPath: default.conf - - name: tenant-prefix - subPath: ingress-config.js - mountPath: /home/cmii-platform/dist/ingress-config.js - volumes: - - name: nginx-conf - configMap: - name: nginx-cm - items: - - key: nginx.conf - path: nginx.conf - - name: default-nginx-conf - configMap: - name: default-nginx-cm - items: - - key: default.conf - path: default.conf - - name: tenant-prefix - configMap: - name: tenant-prefix-{{ .ShortName }} - items: - - key: ingress-config.js - path: ingress-config.js ` diff --git a/agent-operator/deploy/c_app/TemplateCmiiFrontend.go b/agent-operator/deploy/c_app/TemplateCmiiFrontend.go new file mode 100644 index 0000000..dace056 --- /dev/null +++ b/agent-operator/deploy/c_app/TemplateCmiiFrontend.go @@ -0,0 +1,103 @@ +package c_app + +const CmiiFrontendDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .AppName }} + namespace: {{ .Namespace }} + labels: + cmii.type: frontend + cmii.app: {{ .AppName }} + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: {{ .TagVersion }} +spec: + replicas: {{ .Replicas }} + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: {{ .AppName }} + template: + metadata: + labels: + cmii.type: frontend + cmii.app: {{ .AppName }} + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: {{ .AppName }} + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }} + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: {{ .Namespace }} + - name: APPLICATION_NAME + value: {{ .AppName }} + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + volumeMounts: + - name: nginx-conf + mountPath: /usr/local/nginx/conf/nginx.conf + subPath: nginx.conf + - name: default-nginx-conf + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: default-nginx-conf + configMap: + name: default-nginx-cm + items: + - key: default.conf + path: default.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-{{ .ShortName }} + items: + - key: ingress-config.js + path: ingress-config.js +` + +const CmiiFrontendServiceTemplate = ` +apiVersion: v1 +kind: Service +metadata: + name: {{ .AppName }} + namespace: {{ .Namespace }} + labels: + cmii.type: frontend + cmii.app: {{ .AppName }} + octopus.control: frontend-app-wdd + app.kubernetes.io/version: {{ .TagVersion }} +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: {{ .AppName }} + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 +` diff --git a/agent-operator/deploy/c_app/TemplateCmiiSRS.go b/agent-operator/deploy/c_app/TemplateCmiiSRS.go new file mode 100644 index 0000000..929c68d --- /dev/null +++ b/agent-operator/deploy/c_app/TemplateCmiiSRS.go @@ -0,0 +1,502 @@ +package c_app + +const CmiiSrsTemplate = ` +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-srs-cm + namespace: {{ .Namespace }} + labels: + cmii.app: live-srs + cmii.type: live + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 +data: + srs.rtc.conf: |- + listen 30935; + max_connections 4096; + srs_log_tank console; + srs_log_level info; + srs_log_file /home/srs.log; + daemon off; + http_api { + enabled on; + listen 1985; + crossdomain on; + } + stats { + network 0; + } + http_server { + enabled on; + listen 8080; + dir /home/hls; + } + srt_server { + enabled on; + listen 30556; + maxbw 1000000000; + connect_timeout 4000; + peerlatency 600; + recvlatency 600; + } + rtc_server { + enabled on; + listen 30090; + candidate $CANDIDATE; + } + vhost __defaultVhost__ { + http_hooks { + enabled on; + on_publish http://helm-live-op-svc-v2:8080/hooks/on_push; + } + http_remux { + enabled on; + } + rtc { + enabled on; + rtmp_to_rtc on; + rtc_to_rtmp on; + keep_bframe off; + } + tcp_nodelay on; + min_latency on; + play { + gop_cache off; + mw_latency 100; + mw_msgs 10; + } + publish { + firstpkt_timeout 8000; + normal_timeout 4000; + mr on; + } + dvr { + enabled off; + dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4; + dvr_plan session; + } + hls { + enabled on; + hls_path /home/hls; + hls_fragment 10; + hls_window 60; + hls_m3u8_file [app]/[stream].m3u8; + hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts; + hls_cleanup on; + hls_entry_prefix http://{{ .WebIP }}:{{ .WebPort }}; + } + } +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc-exporter + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + nodePort: 30935 + - name: rtc + protocol: UDP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: rtc-tcp + protocol: TCP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: srt + protocol: UDP + port: 30556 + targetPort: 30556 + nodePort: 30556 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + nodePort: 30557 + selector: + srs-role: rtc + type: NodePort + sessionAffinity: None + externalTrafficPolicy: Cluster + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srsrtc-svc + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: helm-live-srs-rtc + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-srs + cmii.type: live + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 + srs-role: rtc +spec: + replicas: 1 + selector: + matchLabels: + srs-role: rtc + template: + metadata: + creationTimestamp: null + labels: + srs-role: rtc + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-srs-cm + items: + - key: srs.rtc.conf + path: docker.conf + defaultMode: 420 + - name: srs-vol + emptyDir: + sizeLimit: 8Gi + containers: + - name: srs-rtc + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/srs:v5.0.195 + ports: + - name: srs-rtmp + containerPort: 30935 + protocol: TCP + - name: srs-api + containerPort: 1985 + protocol: TCP + - name: srs-flv + containerPort: 8080 + protocol: TCP + - name: srs-webrtc + containerPort: 30090 + protocol: UDP + - name: srs-webrtc-tcp + containerPort: 30090 + protocol: TCP + - name: srs-srt + containerPort: 30556 + protocol: UDP + env: + - name: CANDIDATE + value: {{ .WebIP }} + resources: + limits: + cpu: 1200m + memory: 6Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /usr/local/srs/conf/docker.conf + subPath: docker.conf + - name: srs-vol + mountPath: /home/dvr + subPath: {{ .Namespace }}/helm-live/dvr + - name: srs-vol + mountPath: /home/hls + subPath: {{ .Namespace }}/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + - name: oss-adaptor + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/cmii-srs-oss-adaptor:2023-SA + env: + - name: OSS_ENDPOINT + value: 'http://{{ .MinioInnerIP }}:9000' + - name: OSS_AK + value: cmii + - name: OSS_SK + value: 'B#923fC7mk' + - name: OSS_BUCKET + value: live-cluster-hls + - name: SRS_OP + value: 'http://helm-live-op-svc-v2:8080' + - name: MYSQL_ENDPOINT + value: 'helm-mysql:3306' + - name: MYSQL_USERNAME + value: k8s_admin + - name: MYSQL_PASSWORD + value: fP#UaH6qQ3)8 + - name: MYSQL_DATABASE + value: cmii_live_srs_op + - name: MYSQL_TABLE + value: live_segment + - name: LOG_LEVEL + value: info + - name: OSS_META + value: 'yes' + resources: + limits: + cpu: 1200m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-vol + mountPath: /cmii/share/hls + subPath: {{ .Namespace }}/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + imagePullSecrets: + - name: harborsecret + affinity: {} + schedulerName: default-scheduler + serviceName: helm-live-srsrtc-svc + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: 0 + revisionHistoryLimit: 10 +--- +# live-srs部分 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helm-live-op-v2 + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live + helm.sh/chart: cmlc-live-live-op-2.0.0 + live-role: op-v2 +spec: + replicas: 1 + selector: + matchLabels: + live-role: op-v2 + template: + metadata: + creationTimestamp: null + labels: + live-role: op-v2 + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-op-cm-v2 + items: + - key: live.op.conf + path: bootstrap.yaml + defaultMode: 420 + containers: + - name: helm-live-op-v2 + image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/cmii-live-operator:5.2.0 + ports: + - name: operator + containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 4800m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /cmii/bootstrap.yaml + subPath: bootstrap.yaml + livenessProbe: + httpGet: + path: /cmii/ping + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/ping + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + imagePullSecrets: + - name: harborsecret + affinity: {} + schedulerName: default-scheduler + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + maxSurge: 25% + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc-v2 + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + nodePort: 30333 + selector: + live-role: op-v2 + type: NodePort + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + live-role: op + type: ClusterIP + sessionAffinity: None +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-op-cm-v2 + namespace: {{ .Namespace }} + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live +data: + live.op.conf: |- + server: + port: 8080 + spring: + main: + allow-bean-definition-overriding: true + allow-circular-references: true + application: + name: cmii-live-operator + platform: + info: + name: cmii-live-operator + description: cmii-live-operator + version: {{ .TagVersion }} + scanPackage: com.cmii.live.op + cloud: + nacos: + config: + username: developer + password: N@cos14Good + server-addr: helm-nacos:8848 + extension-configs: + - data-id: cmii-live-operator.yml + group: {{ .TagVersion }} + refresh: true + shared-configs: + - data-id: cmii-backend-system.yml + group: {{ .TagVersion }} + refresh: true + discovery: + enabled: false + + live: + engine: + type: srs + endpoint: 'http://helm-live-srs-svc:1985' + + proto: + rtmp: 'rtmp://{{ .WebIP }}:30935' + rtsp: 'rtsp://{{ .WebIP }}:30554' + srt: 'srt://{{ .WebIP }}:30556' + flv: 'http://{{ .WebIP }}:30500' + hls: 'http://{{ .WebIP }}:30500' + rtc: 'webrtc://{{ .WebIP }}:30557' + replay: 'https://{{ .WebIP }}:30333' + minio: + endpoint: http://{{ .MinioInnerIP }}:9000 + access-key: cmii + secret-key: B#923fC7mk + bucket: live-cluster-hls +` diff --git a/agent-operator/deploy/c_app/TemplateIngressConfigMap.go b/agent-operator/deploy/c_app/TemplateIngressConfigMap.go new file mode 100644 index 0000000..0a29788 --- /dev/null +++ b/agent-operator/deploy/c_app/TemplateIngressConfigMap.go @@ -0,0 +1,610 @@ +package c_app + +const CmiiFrontendConfigMapTemplate = ` +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .AppName }} + namespace: {{ .Namespace }} +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "{{ .TenantEnv }}", + CloudHOST: "{{ .WebIP }}:{{ .WebPort }}", + ApplicationShortName: "{{ .ShortName }}", + AppClientId: "{{ .ClientId }}" + } +` + +const CmiiFrontendDefaultNginxConfTemplate = ` +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-cm + namespace: {{ .Namespace }} + labels: + cmii.type: frontend +data: + nginx.conf: | + user root; + worker_processes auto; + + events { + worker_connections 1024; + use_epoll on; + } + http { + include mime.types; + default_type application/octet-stream; + + sendfile on; + + keepalive_timeout 600; + + server { + listen 9528; + server_name localhost; + gzip on; + + location / { + root /home/cmii-platform/dist; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root html; + } + } + } +` + +const CmiiFrontendIngressTemplate = ` +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: frontend-applications-ingress + namespace: {{ .Namespace }} + labels: + type: frontend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^(/green)$ $1/ redirect; + rewrite ^(/supervision)$ $1/ redirect; + rewrite ^(/inspection)$ $1/ redirect; + rewrite ^(/park)$ $1/ redirect; + rewrite ^(/pangu)$ $1/ redirect; + rewrite ^(/ai-brain)$ $1/ redirect; + rewrite ^(/base)$ $1/ redirect; + rewrite ^(/cms)$ $1/ redirect; + rewrite ^(/cmsportal)$ $1/ redirect; + rewrite ^(/detection)$ $1/ redirect; + rewrite ^(/emergency)$ $1/ redirect; + rewrite ^(/hyper)$ $1/ redirect; + rewrite ^(/logistics)$ $1/ redirect; + rewrite ^(/mws)$ $1/ redirect; + rewrite ^(/mws-admin)$ $1/ redirect; + rewrite ^(/oms)$ $1/ redirect; + rewrite ^(/open)$ $1/ redirect; + rewrite ^(/security)$ $1/ redirect; + rewrite ^(/share)$ $1/ redirect; + rewrite ^(/splice)$ $1/ redirect; + rewrite ^(/splice-visual)$ $1/ redirect; + rewrite ^(/traffic)$ $1/ redirect; + rewrite ^(/visualization)$ $1/ redirect; + rewrite ^(/communication)$ $1/ redirect; + rewrite ^(/infrastructure)$ $1/ redirect; + rewrite ^(/media)$ $1/ redirect; + rewrite ^(/seniclive)$ $1/ redirect; +spec: + rules: + - host: fake-domain.{{ .Namespace }}.io + http: + paths: + - path: /inspection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /supervision/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervision + servicePort: 9528 + - path: /supervisionh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervisionh5 + servicePort: 9528 + - path: /green/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /park/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /pangu/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /emersupport/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /infrastructure/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /ai-brain/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-ai-brain + servicePort: 9528 + - path: /base/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-base + servicePort: 9528 + - path: /cms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms + servicePort: 9528 + - path: /cmsportal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /detection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-detection + servicePort: 9528 + - path: /emergency/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-emergency-rescue + servicePort: 9528 + - path: /hyper/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-hyperspectral + servicePort: 9528 + - path: /logistics/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-logistics + servicePort: 9528 + - path: /mws/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws + servicePort: 9528 + - path: /mws-admin/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws-admin + servicePort: 9528 + - path: /oms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-oms + servicePort: 9528 + - path: /open/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-open + servicePort: 9528 + - path: /security/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /share/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-share + servicePort: 9528 + - path: /splice/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice + servicePort: 9528 + - path: /splice-visual/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice-visual + servicePort: 9528 + - path: /traffic/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /visualization/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-visualization + servicePort: 9528 + - path: /communication/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /media/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-media + servicePort: 9528 + - path: /seniclive/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-seniclive + servicePort: 9528 + - path: /jiangsuwenlv/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-jiangsuwenlv + servicePort: 9528 + - path: /qinghaitourism/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qinghaitourism + servicePort: 9528 + - path: /securityh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-securityh5 + servicePort: 9528 + - path: /fireRescue/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 +` + +const CmiiBackendIngressTemplate = ` +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: backend-applications-ingress + namespace: {{ .Namespace }} + labels: + type: backend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" +spec: + rules: + - host: cmii-admin-data.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-data + servicePort: 8080 + - host: cmii-admin-gateway.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - host: cmii-admin-user.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-user + servicePort: 8080 + - host: cmii-open-gateway.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - host: cmii-uav-airspace.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-airspace + servicePort: 8080 + - host: cmii-uav-brain.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-brain + servicePort: 8080 + - host: cmii-uav-clusters.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-clusters + servicePort: 8080 + - host: cmii-uav-cms.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cms + servicePort: 8080 + - host: cmii-uav-data-post-process.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-data-post-process + servicePort: 8080 + - host: cmii-uav-developer.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-developer + servicePort: 8080 + - host: cmii-uav-device.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-device + servicePort: 8080 + - host: cmii-uav-gateway.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 + - host: cmii-uav-industrial-portfolio.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-industrial-portfolio + servicePort: 8080 + - host: cmii-uav-kpi-monitor.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-kpi-monitor + servicePort: 8080 + - host: cmii-uav-live.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-live + servicePort: 8080 + - host: cmii-uav-cloud-live.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cloud-live + servicePort: 8080 + - host: cmii-uav-logger.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-logger + servicePort: 8080 + - host: cmii-uav-material-warehouse.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-material-warehouse + servicePort: 8080 + - host: cmii-uav-mission.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mission + servicePort: 8080 + - host: cmii-uav-monitor.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-monitor + servicePort: 8080 + - host: cmii-uav-mqtthandler.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mqtthandler + servicePort: 8080 + - host: cmii-uav-notice.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-notice + servicePort: 8080 + - host: cmii-uav-oauth.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-oauth + servicePort: 8080 + - host: cmii-uav-process.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-process + servicePort: 8080 + - host: cmii-uav-security-system.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-security-system + servicePort: 8080 + - host: cmii-uav-surveillance.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-surveillance + servicePort: 8080 + - host: cmii-uav-user.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-user + servicePort: 8080 + - host: cmii-uav-waypoint.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-waypoint + servicePort: 8080 + - host: cmii-uav-alarm.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-alarm + servicePort: 8080 + - host: cmii-uav-emergency.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-emergency + servicePort: 8080 + - host: cmii-uav-integration.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-integration + servicePort: 8080 + - host: cmii-suav-supervision.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-supervision + servicePort: 8080 + - host: cmii-uav-gis-server.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gis-server + servicePort: 8080 + - host: cmii-uav-grid-datasource.uavcloud-{{ .Namespace }}.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-datasource + servicePort: 8080 +` + +const CmiiGatewayIngressTemplate = ` +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: all-gateways-ingress + namespace: {{ .Namespace }} + labels: + type: api-gateway + octopus.control: all-ingress-config-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: {{ .TagVersion }} + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; +spec: + rules: + - host: fake-domain.{{ .Namespace }}.io + http: + paths: + - path: /oms/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - path: /open/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - path: /api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 +` diff --git a/agent-operator/deploy/z_dep/G.go b/agent-operator/deploy/z_dep/G.go new file mode 100644 index 0000000..d5cdf16 --- /dev/null +++ b/agent-operator/deploy/z_dep/G.go @@ -0,0 +1,119 @@ +package z_dep + +import ( + "bytes" + "fmt" + "github.com/go-playground/validator/v10" + "reflect" + "runtime" + "text/template" + "wdd.io/agent-common/assert" + "wdd.io/agent-common/logger" + "wdd.io/agent-common/utils" +) + +var ApplyFilePrefix = "" + +var Asserter = assert.Asserter + +var log = logger.Log + +func init() { + switch runtime.GOOS { + case "linux": + ApplyFilePrefix = "Linux value" + case "darwin": // macOS + ApplyFilePrefix = "MacOS value" + case "windows": + ApplyFilePrefix = "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\agent-operator\\deploy\\z_file\\" + default: + ApplyFilePrefix = "Unknown OS" + } + + fmt.Printf("ApplyFilePrefix: %s\n", ApplyFilePrefix) +} + +type CommonEnvironmentConfig struct { + WebIP string `json:"web_ip,omitempty" valid:"required"` //A1C1IP + WebPort string `json:"web_port,omitempty" valid:"required"` //A1C1JS + HarborIP string `json:"harbor_ip,omitempty" valid:"required"` //A1C2IP + HarborPort string `json:"harbor_port,omitempty" valid:"required"` // default 8033 + Namespace string `json:"namespace,omitempty" valid:"required"` // SUPREME + TagVersion string `json:"tag_version,omitempty" valid:"required"` // KIMMY + TenantEnv string `json:"tenant_env,omitempty"` // TENANT_ENV 只在内部使用 + MinioPublicIP string `json:"minio_public_ip,omitempty"` // M2C1IP + MinioInnerIP string `json:"minio_inner_ip,omitempty"` // M2D2IP + NFSServerIP string `json:"nfs_server_ip,omitempty"` // N1C2IP +} + +//func (env *CommonEnvironmentConfig) CompactEnv() { +// +// copySameFields(env, c_app.DefaultCmiiBackendConfig) +// copySameFields(env, c_app.DefaultCmiiFrontendConfig) +//} +// +// + +func (env *CommonEnvironmentConfig) ValidateAndUniform() bool { + validate := validator.New() + err := validate.Struct(env) + if err != nil { + fmt.Printf("backend config validate error: %v\n", err) + return false + } + + // uniform all + if env.MinioInnerIP == "" { + env.MinioInnerIP = env.HarborIP + } + + if env.MinioPublicIP == "" { + env.MinioPublicIP = env.WebIP + } + + return true +} + +func (env *CommonEnvironmentConfig) ParseCommonEnvToApplyFile(applyTemplate string, applyFilePath string) bool { + + return ParseEnvToApplyFile(env, applyTemplate, applyFilePath) +} + +func ParseEnvToApplyFile(environment any, applyTemplate string, applyFilePath string) bool { + + randomString := utils.GenerateRandomString(8) + + // Deployment + tmpl, err := template.New(randomString).Parse(applyTemplate) + if err != nil { + log.ErrorF("parse template error: %v", err) + return false + } + // 应用数据并打印结果 + var result bytes.Buffer + err = tmpl.Execute(&result, environment) + if err != nil { + log.ErrorF("template execute error: %v", err) + return false + } + + // append to file + if !utils.AppendContentWithSplitLineToFile(result.String(), applyFilePath) { + return false + } + + return true +} + +// CopySameFields 利用反射,将a中的所有同名字段的值 复制到b中的对应字段 +func CopySameFields(a, b interface{}) { + va := reflect.ValueOf(a).Elem() + vb := reflect.ValueOf(b).Elem() + + for i := 0; i < va.NumField(); i++ { + fieldName := va.Type().Field(i).Name + if vb.FieldByName(fieldName).IsValid() { + vb.FieldByName(fieldName).Set(va.Field(i)) + } + } +} diff --git a/agent-operator/deploy/z_file/k8s-backend.yaml b/agent-operator/deploy/z_file/k8s-backend.yaml new file mode 100644 index 0000000..614fcbf --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-backend.yaml @@ -0,0 +1,417 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-data + namespace: zjjt + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.5.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-data + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-data + image: 10.100.2.121:8033/cmii/cmii-admin-data:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjjt + - name: APPLICATION_NAME + value: cmii-admin-data + - name: CUST_JAVA_OPTS + value: "-Xms500m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.5.0 + - name: SYS_CONFIG_GROUP + value: 5.5.0 + - name: IMAGE_VERSION + value: 5.5.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: . + memory: 2Gi + cpu: "2" + requests: + memory: 1Gi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: glusterfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjjt/cmii-admin-data + volumes: + - name: glusterfs-backend-log-volume + persistentVolumeClaim: + claimName: glusterfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-data + namespace: zjjt + labels: + cmii.type: backend + cmii.app: cmii-admin-data + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.5.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-data + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-gateway + namespace: zjjt + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.5.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-gateway + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-gateway + image: 10.100.2.121:8033/cmii/cmii-admin-gateway:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjjt + - name: APPLICATION_NAME + value: cmii-admin-gateway + - name: CUST_JAVA_OPTS + value: "-Xms500m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.5.0 + - name: SYS_CONFIG_GROUP + value: 5.5.0 + - name: IMAGE_VERSION + value: 5.5.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: . + memory: 2Gi + cpu: "2" + requests: + memory: 1Gi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: glusterfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjjt/cmii-admin-gateway + volumes: + - name: glusterfs-backend-log-volume + persistentVolumeClaim: + claimName: glusterfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-gateway + namespace: zjjt + labels: + cmii.type: backend + cmii.app: cmii-admin-gateway + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.5.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-gateway + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-user + namespace: zjjt + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.5.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: backend + cmii.app: cmii-admin-user + template: + metadata: + labels: + cmii.type: backend + cmii.app: cmii-admin-user + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: uavcloud.env + operator: In + values: + - demo + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-user + image: 10.100.2.121:8033/cmii/cmii-admin-user:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjjt + - name: APPLICATION_NAME + value: cmii-admin-user + - name: CUST_JAVA_OPTS + value: "-Xms500m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true" + - name: NACOS_REGISTRY + value: "helm-nacos:8848" + - name: NACOS_DISCOVERY_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NACOS_DISCOVERY_PORT + value: "8080" + - name: BIZ_CONFIG_GROUP + value: 5.5.0 + - name: SYS_CONFIG_GROUP + value: 5.5.0 + - name: IMAGE_VERSION + value: 5.5.0 + - name: NACOS_USERNAME + value: "developer" + - name: NACOS_PASSWORD + value: "Deve@9128201" + ports: + - name: pod-port + containerPort: 8080 + protocol: TCP + resources: + limits: . + memory: 2Gi + cpu: "2" + requests: + memory: 1Gi + cpu: 200m + livenessProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + startupProbe: + httpGet: + path: /cmii/ping + port: pod-port + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 3 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: glusterfs-backend-log-volume + mountPath: /cmii/logs + readOnly: false + subPath: zjjt/cmii-admin-user + volumes: + - name: glusterfs-backend-log-volume + persistentVolumeClaim: + claimName: glusterfs-backend-log-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-user + namespace: zjjt + labels: + cmii.type: backend + cmii.app: cmii-admin-user + octopus/control: backend-app-1.0.0 + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/app-version: 5.5.0 +spec: + type: ClusterIP + selector: + cmii.type: backend + cmii.app: cmii-admin-user + ports: + - name: backend-tcp + port: 8080 + protocol: TCP + targetPort: 8080 diff --git a/agent-operator/deploy/z_file/k8s-configmap.yaml b/agent-operator/deploy/z_file/k8s-configmap.yaml new file mode 100644 index 0000000..e6e15bb --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-configmap.yaml @@ -0,0 +1,294 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-traffic + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "traffic", + AppClientId: "APP_Jc8i2wOQ1t73QEJS" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-base + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "base", + AppClientId: "APP_9LY41OaKSqk2btY0" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-cms-portal + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "cmsportal", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-oms + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "oms", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-splice + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "splice", + AppClientId: "APP_zE0M3sTRXrCIJS8Y" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-detection + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "detection", + AppClientId: "APP_FDHW2VLVDWPnnOCy" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-media + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "media", + AppClientId: "APP_4AU8lbifESQO4FD6" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-multiterminal + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "multiterminal", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-security + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "security", + AppClientId: "APP_JUSEMc7afyWXxvE7" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-suav-platform-supervision + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "supervision", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-suav-platform-supervisionh5 + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "supervisionh5", + AppClientId: "APP_qqSu82THfexI8PLM" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "platform", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-ai-brain + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "ai-brain", + AppClientId: "APP_rafnuCAmBESIVYMH" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-securityh5 + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "securityh5", + AppClientId: "APP_N3ImO0Ubfu9peRHD" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-seniclive + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "seniclive", + AppClientId: "empty" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-share + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "share", + AppClientId: "APP_4lVSVI0ZGxTssir8" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-armypeople + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "armypeople", + AppClientId: "APP_UIegse6Lfou9pO1U" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-logistics + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "logistics", + AppClientId: "APP_PvdfRRRBPL8xbIwl" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-mws + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "mws", + AppClientId: "APP_uKniXPELlRERBBwK" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-emergency-rescue + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "emergency", + AppClientId: "APP_aGsTAY1uMZrpKdfk" + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cmii-uav-platform-open + namespace: zjjt +data: + ingress-config.js: |- + var GlobalIngressConfig = { + TenantEnvironment: "", + CloudHOST: "10.100.2.121:8888", + ApplicationShortName: "open", + AppClientId: "empty" + } diff --git a/agent-operator/deploy/z_file/k8s-dashboard.yaml b/agent-operator/deploy/z_file/k8s-dashboard.yaml new file mode 100644 index 0000000..a4b294d --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-dashboard.yaml @@ -0,0 +1,307 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + name: kubernetes-dashboard + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + nodePort: 30554 + selector: + k8s-app: kubernetes-dashboard + type: NodePort + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kube-system +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kube-system +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kube-system + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [ "" ] + resources: [ "secrets" ] + resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ] + verbs: [ "get", "update", "delete" ] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [ "" ] + resources: [ "configmaps" ] + resourceNames: [ "kubernetes-dashboard-settings" ] + verbs: [ "get", "update" ] + # Allow Dashboard to get metrics. + - apiGroups: [ "" ] + resources: [ "services" ] + resourceNames: [ "heapster", "dashboard-metrics-scraper" ] + verbs: [ "proxy" ] + - apiGroups: [ "" ] + resources: [ "services/proxy" ] + resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ] + verbs: [ "get" ] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: [ "metrics.k8s.io" ] + resources: [ "pods", "nodes" ] + verbs: [ "get", "list", "watch" ] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: 10.100.2.121:8033/cmii/dashboard:v2.0.1 + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kube-system + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: { } + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: 10.100.2.121:8033/cmii/metrics-scraper:v1.0.4 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: { } +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: admin-user + namespace: kube-system diff --git a/agent-operator/deploy/z_file/k8s-emqx.yaml b/agent-operator/deploy/z_file/k8s-emqx.yaml new file mode 100644 index 0000000..7e61420 --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-emqx.yaml @@ -0,0 +1,263 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-emqxs + namespace: zjjt +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-env + namespace: zjjt + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 +data: + EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443 + EMQX_NAME: helm-emqxs + EMQX_CLUSTER__DISCOVERY: k8s + EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs + EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns" + EMQX_CLUSTER__K8S__namespace: zjjt + EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local + EMQX_ALLOW_ANONYMOUS: "false" + EMQX_ACL_NOMATCH: "deny" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-emqxs-cm + namespace: zjjt + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 +data: + emqx_auth_username.conf: |- + auth.user.1.username = cmlc + auth.user.1.password = odD8#Ve7.B + auth.user.password_hash = sha256 + + acl.conf: |- + {allow, {user, "admin"}, pubsub, ["admin/#"]}. + {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}. + {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}. + {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}. + {allow, all}. + + loaded_plugins: |- + {emqx_auth_username,true}. + {emqx_management, true}. + {emqx_recon, true}. + {emqx_retainer, false}. + {emqx_dashboard, true}. + {emqx_telemetry, true}. + {emqx_rule_engine, true}. + {emqx_bridge_mqtt, false}. +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-emqxs + namespace: zjjt + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 +spec: + replicas: 1 + serviceName: helm-emqxs-headless + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + template: + metadata: + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 + spec: + affinity: { } + serviceAccountName: helm-emqxs + containers: + - name: helm-emqxs + image: 10.100.2.121:8033/cmii/emqx:5.5.1 + imagePullPolicy: Always + ports: + - name: mqtt + containerPort: 1883 + - name: mqttssl + containerPort: 8883 + - name: mgmt + containerPort: 8081 + - name: ws + containerPort: 8083 + - name: wss + containerPort: 8084 + - name: dashboard + containerPort: 18083 + - name: ekka + containerPort: 4370 + envFrom: + - configMapRef: + name: helm-emqxs-env + resources: { } + volumeMounts: + - name: emqx-data + mountPath: "/opt/emqx/data/mnesia" + readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf" + subPath: emqx_auth_username.conf + readOnly: false + # - name: helm-emqxs-cm + # mountPath: "/opt/emqx/etc/acl.conf" + # subPath: "acl.conf" + # readOnly: false + - name: helm-emqxs-cm + mountPath: "/opt/emqx/data/loaded_plugins" + subPath: loaded_plugins + readOnly: false + volumes: + - name: emqx-data + persistentVolumeClaim: + claimName: helm-emqxs + - name: helm-emqxs-cm + configMap: + name: helm-emqxs-cm + items: + - key: emqx_auth_username.conf + path: emqx_auth_username.conf + - key: acl.conf + path: acl.conf + - key: loaded_plugins + path: loaded_plugins +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: zjjt +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - watch + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-emqxs + namespace: zjjt +subjects: + - kind: ServiceAccount + name: helm-emqxs + namespace: zjjt +roleRef: + kind: Role + name: helm-emqxs + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs + namespace: zjjt + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 +spec: + type: NodePort + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - port: 1883 + name: mqtt + targetPort: 1883 + nodePort: 31883 + - port: 18083 + name: dashboard + targetPort: 18083 + nodePort: 38085 + - port: 8083 + name: mqtt-websocket + targetPort: 8083 + nodePort: 38083 +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-emqxs-headless + namespace: zjjt + labels: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + helm.sh/chart: emqx-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 +spec: + type: ClusterIP + clusterIP: None + selector: + cmii.type: middleware + cmii.app: helm-emqxs + cmii.emqx.architecture: cluster + ports: + - name: mqtt + port: 1883 + protocol: TCP + targetPort: 1883 + - name: mqttssl + port: 8883 + protocol: TCP + targetPort: 8883 + - name: mgmt + port: 8081 + protocol: TCP + targetPort: 8081 + - name: websocket + port: 8083 + protocol: TCP + targetPort: 8083 + - name: wss + port: 8084 + protocol: TCP + targetPort: 8084 + - name: dashboard + port: 18083 + protocol: TCP + targetPort: 18083 + - name: ekka + port: 4370 + protocol: TCP + targetPort: 4370 diff --git a/agent-operator/deploy/z_file/k8s-frontend.yaml b/agent-operator/deploy/z_file/k8s-frontend.yaml new file mode 100644 index 0000000..0332c1d --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-frontend.yaml @@ -0,0 +1,98 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmii-admin-web + namespace: zjjt + labels: + cmii.type: frontend + cmii.app: cmii-admin-web + octopus.control: frontend-app-wdd + app.kubernetes.io/app-version: 5.5.0 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + cmii.type: frontend + cmii.app: cmii-admin-web + template: + metadata: + labels: + cmii.type: frontend + cmii.app: cmii-admin-web + spec: + imagePullSecrets: + - name: harborsecret + containers: + - name: cmii-admin-web + image: 10.100.2.121:8033/cmii/cmii-admin-web:5.2.0 + imagePullPolicy: Always + env: + - name: K8S_NAMESPACE + value: zjjt + - name: APPLICATION_NAME + value: cmii-admin-web + ports: + - name: platform-9528 + containerPort: 9528 + protocol: TCP + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + volumeMounts: + - name: nginx-conf + mountPath: /usr/local/nginx/conf/nginx.conf + subPath: nginx.conf + - name: default-nginx-conf + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + - name: tenant-prefix + subPath: ingress-config.js + mountPath: /home/cmii-platform/dist/ingress-config.js + volumes: + - name: nginx-conf + configMap: + name: nginx-cm + items: + - key: nginx.conf + path: nginx.conf + - name: default-nginx-conf + configMap: + name: default-nginx-cm + items: + - key: default.conf + path: default.conf + - name: tenant-prefix + configMap: + name: tenant-prefix-open + items: + - key: ingress-config.js + path: ingress-config.js +--- +apiVersion: v1 +kind: Service +metadata: + name: cmii-admin-web + namespace: zjjt + labels: + cmii.type: frontend + cmii.app: cmii-admin-web + octopus.control: frontend-app-wdd + app.kubernetes.io/version: 5.5.0 +spec: + type: ClusterIP + selector: + cmii.type: frontend + cmii.app: cmii-admin-web + ports: + - name: web-svc-port + port: 9528 + protocol: TCP + targetPort: 9528 diff --git a/agent-operator/deploy/z_file/k8s-ingress.yaml b/agent-operator/deploy/z_file/k8s-ingress.yaml new file mode 100644 index 0000000..98e411b --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-ingress.yaml @@ -0,0 +1,544 @@ +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: frontend-applications-ingress + namespace: zjjt + labels: + type: frontend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^(/green)$ $1/ redirect; + rewrite ^(/supervision)$ $1/ redirect; + rewrite ^(/inspection)$ $1/ redirect; + rewrite ^(/park)$ $1/ redirect; + rewrite ^(/pangu)$ $1/ redirect; + rewrite ^(/ai-brain)$ $1/ redirect; + rewrite ^(/base)$ $1/ redirect; + rewrite ^(/cms)$ $1/ redirect; + rewrite ^(/cmsportal)$ $1/ redirect; + rewrite ^(/detection)$ $1/ redirect; + rewrite ^(/emergency)$ $1/ redirect; + rewrite ^(/hyper)$ $1/ redirect; + rewrite ^(/logistics)$ $1/ redirect; + rewrite ^(/mws)$ $1/ redirect; + rewrite ^(/mws-admin)$ $1/ redirect; + rewrite ^(/oms)$ $1/ redirect; + rewrite ^(/open)$ $1/ redirect; + rewrite ^(/security)$ $1/ redirect; + rewrite ^(/share)$ $1/ redirect; + rewrite ^(/splice)$ $1/ redirect; + rewrite ^(/splice-visual)$ $1/ redirect; + rewrite ^(/traffic)$ $1/ redirect; + rewrite ^(/visualization)$ $1/ redirect; + rewrite ^(/communication)$ $1/ redirect; + rewrite ^(/infrastructure)$ $1/ redirect; + rewrite ^(/media)$ $1/ redirect; + rewrite ^(/seniclive)$ $1/ redirect; +spec: + rules: + - host: fake-domain.zjjt.io + http: + paths: + - path: /inspection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /supervision/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervision + servicePort: 9528 + - path: /supervisionh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-platform-supervisionh5 + servicePort: 9528 + - path: /green/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /park/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /pangu/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /emersupport/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /infrastructure/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /ai-brain/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-ai-brain + servicePort: 9528 + - path: /base/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-base + servicePort: 9528 + - path: /cms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms + servicePort: 9528 + - path: /cmsportal/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-cms-portal + servicePort: 9528 + - path: /detection/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-detection + servicePort: 9528 + - path: /emergency/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-emergency-rescue + servicePort: 9528 + - path: /hyper/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-hyperspectral + servicePort: 9528 + - path: /logistics/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-logistics + servicePort: 9528 + - path: /mws/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws + servicePort: 9528 + - path: /mws-admin/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-mws-admin + servicePort: 9528 + - path: /oms/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-oms + servicePort: 9528 + - path: /open/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-open + servicePort: 9528 + - path: /security/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /share/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-share + servicePort: 9528 + - path: /splice/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice + servicePort: 9528 + - path: /splice-visual/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-splice-visual + servicePort: 9528 + - path: /traffic/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /visualization/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-visualization + servicePort: 9528 + - path: /communication/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 + - path: /media/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-media + servicePort: 9528 + - path: /seniclive/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-seniclive + servicePort: 9528 + - path: /jiangsuwenlv/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-jiangsuwenlv + servicePort: 9528 + - path: /qinghaitourism/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-qinghaitourism + servicePort: 9528 + - path: /securityh5/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform-securityh5 + servicePort: 9528 + - path: /fireRescue/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-platform + servicePort: 9528 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: backend-applications-ingress + namespace: zjjt + labels: + type: backend + octopus.control: all-ingress-config-wdd + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" +spec: + rules: + - host: cmii-admin-data.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-data + servicePort: 8080 + - host: cmii-admin-gateway.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - host: cmii-admin-user.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-user + servicePort: 8080 + - host: cmii-open-gateway.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - host: cmii-uav-airspace.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-airspace + servicePort: 8080 + - host: cmii-uav-brain.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-brain + servicePort: 8080 + - host: cmii-uav-clusters.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-clusters + servicePort: 8080 + - host: cmii-uav-cms.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cms + servicePort: 8080 + - host: cmii-uav-data-post-process.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-data-post-process + servicePort: 8080 + - host: cmii-uav-developer.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-developer + servicePort: 8080 + - host: cmii-uav-device.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-device + servicePort: 8080 + - host: cmii-uav-gateway.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 + - host: cmii-uav-industrial-portfolio.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-industrial-portfolio + servicePort: 8080 + - host: cmii-uav-kpi-monitor.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-kpi-monitor + servicePort: 8080 + - host: cmii-uav-live.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-live + servicePort: 8080 + - host: cmii-uav-cloud-live.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-cloud-live + servicePort: 8080 + - host: cmii-uav-logger.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-logger + servicePort: 8080 + - host: cmii-uav-material-warehouse.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-material-warehouse + servicePort: 8080 + - host: cmii-uav-mission.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mission + servicePort: 8080 + - host: cmii-uav-monitor.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-monitor + servicePort: 8080 + - host: cmii-uav-mqtthandler.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-mqtthandler + servicePort: 8080 + - host: cmii-uav-notice.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-notice + servicePort: 8080 + - host: cmii-uav-oauth.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-oauth + servicePort: 8080 + - host: cmii-uav-process.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-process + servicePort: 8080 + - host: cmii-uav-security-system.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-security-system + servicePort: 8080 + - host: cmii-uav-surveillance.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-surveillance + servicePort: 8080 + - host: cmii-uav-user.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-user + servicePort: 8080 + - host: cmii-uav-waypoint.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-waypoint + servicePort: 8080 + - host: cmii-uav-alarm.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-alarm + servicePort: 8080 + - host: cmii-uav-emergency.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-emergency + servicePort: 8080 + - host: cmii-uav-integration.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-integration + servicePort: 8080 + - host: cmii-suav-supervision.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-suav-supervision + servicePort: 8080 + - host: cmii-uav-gis-server.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gis-server + servicePort: 8080 + - host: cmii-uav-grid-datasource.uavcloud-zjjt.io + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-grid-datasource + servicePort: 8080 +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: all-gateways-ingress + namespace: zjjt + labels: + type: api-gateway + octopus.control: all-ingress-config-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; +spec: + rules: + - host: fake-domain.zjjt.io + http: + paths: + - path: /oms/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-admin-gateway + servicePort: 8080 + - path: /open/api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-open-gateway + servicePort: 8080 + - path: /api/?(.*) + pathType: ImplementationSpecific + backend: + serviceName: cmii-uav-gateway + servicePort: 8080 diff --git a/agent-operator/deploy/z_file/k8s-mongo.yaml b/agent-operator/deploy/z_file/k8s-mongo.yaml new file mode 100644 index 0000000..c5fb26c --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-mongo.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mongo + namespace: zjjt + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 +spec: + type: ClusterIP + selector: + cmii.app: helm-mongo + cmii.type: middleware + ports: + - port: 27017 + name: server-27017 + targetPort: 27017 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mongo + namespace: zjjt + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 +spec: + serviceName: helm-mongo + replicas: 1 + selector: + matchLabels: + cmii.app: helm-mongo + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-mongo + cmii.type: middleware + helm.sh/chart: mongo-1.1.0 + app.kubernetes.io/managed-by: octopus-control + app.kubernetes.io/version: 5.5.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + affinity: { } + containers: + - name: helm-mongo + image: 10.100.2.121:8033/cmii/mongo:5.0 + resources: { } + ports: + - containerPort: 27017 + name: mongo27017 + protocol: TCP + env: + - name: MONGO_INITDB_ROOT_USERNAME + value: cmlc + - name: MONGO_INITDB_ROOT_PASSWORD + value: REdPza8#oVlt + volumeMounts: + - name: mongo-data + mountPath: /data/db + readOnly: false + subPath: default/helm-mongo/data/db + volumes: + - name: mongo-data + persistentVolumeClaim: + claimName: helm-mongo +--- diff --git a/agent-operator/deploy/z_file/k8s-mysql.yaml b/agent-operator/deploy/z_file/k8s-mysql.yaml new file mode 100644 index 0000000..0ce3e8e --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-mysql.yaml @@ -0,0 +1,421 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-mysql + namespace: zjjt + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus + annotations: +secrets: + - name: helm-mysql +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-mysql + namespace: zjjt + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + mysql-root-password: "UXpmWFFoZDNiUQ==" + mysql-password: "S0F0cm5PckFKNw==" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql + namespace: zjjt + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + my.cnf: |- + + [mysqld] + port=3306 + basedir=/opt/bitnami/mysql + datadir=/bitnami/mysql/data + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + socket=/opt/bitnami/mysql/tmp/mysql.sock + log-error=/bitnami/mysql/data/error.log + general_log_file = /bitnami/mysql/data/general.log + slow_query_log_file = /bitnami/mysql/data/slow.log + innodb_data_file_path = ibdata1:512M:autoextend + innodb_buffer_pool_size = 512M + innodb_buffer_pool_instances = 2 + innodb_log_file_size = 512M + innodb_log_files_in_group = 4 + innodb_log_files_in_group = 4 + log-bin = /bitnami/mysql/data/mysql-bin + max_binlog_size=1G + transaction_isolation = REPEATABLE-READ + default_storage_engine = innodb + character-set-server = utf8mb4 + collation-server=utf8mb4_bin + binlog_format = ROW + binlog_rows_query_log_events=on + binlog_cache_size=4M + binlog_expire_logs_seconds = 1296000 + max_binlog_cache_size=2G + gtid_mode = on + enforce_gtid_consistency = 1 + sync_binlog = 1 + innodb_flush_log_at_trx_commit = 1 + innodb_flush_method = O_DIRECT + log_slave_updates=1 + relay_log_recovery = 1 + relay-log-purge = 1 + default_time_zone = '+08:00' + lower_case_table_names=1 + log_bin_trust_function_creators=1 + group_concat_max_len=67108864 + innodb_io_capacity = 4000 + innodb_io_capacity_max = 8000 + innodb_flush_sync = 0 + innodb_flush_neighbors = 0 + innodb_write_io_threads = 8 + innodb_read_io_threads = 8 + innodb_purge_threads = 4 + innodb_page_cleaners = 4 + innodb_open_files = 65535 + innodb_max_dirty_pages_pct = 50 + innodb_lru_scan_depth = 4000 + innodb_checksum_algorithm = crc32 + innodb_lock_wait_timeout = 10 + innodb_rollback_on_timeout = 1 + innodb_print_all_deadlocks = 1 + innodb_file_per_table = 1 + innodb_online_alter_log_max_size = 4G + innodb_stats_on_metadata = 0 + innodb_thread_concurrency = 0 + innodb_sync_spin_loops = 100 + innodb_spin_wait_delay = 30 + lock_wait_timeout = 3600 + slow_query_log = 1 + long_query_time = 10 + log_queries_not_using_indexes =1 + log_throttle_queries_not_using_indexes = 60 + min_examined_row_limit = 100 + log_slow_admin_statements = 1 + log_slow_slave_statements = 1 + default_authentication_plugin=mysql_native_password + skip-name-resolve=1 + explicit_defaults_for_timestamp=1 + plugin_dir=/opt/bitnami/mysql/plugin + max_allowed_packet=128M + max_connections = 2000 + max_connect_errors = 1000000 + table_definition_cache=2000 + table_open_cache_instances=64 + tablespace_definition_cache=1024 + thread_cache_size=256 + interactive_timeout = 600 + wait_timeout = 600 + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=32M + bind-address=0.0.0.0 + performance_schema = 1 + performance_schema_instrument = '%memory%=on' + performance_schema_instrument = '%lock%=on' + innodb_monitor_enable=ALL + + [mysql] + no-auto-rehash + + [mysqldump] + quick + max_allowed_packet = 32M + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-mysql-init-scripts + namespace: zjjt + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: primary +data: + create_users_grants_core.sql: |- + create + user zyly@'%' identified by 'Cmii@451315'; + grant select on *.* to zyly@'%'; + create + user zyly_qc@'%' identified by 'Uh)E_owCyb16'; + grant all + on *.* to zyly_qc@'%'; + create + user k8s_admin@'%' identified by 'fP#UaH6qQ3)8'; + grant all + on *.* to k8s_admin@'%'; + create + user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH'; + grant all + on *.* to audit_dba@'%'; + create + user db_backup@'%' identified by 'RU5Pu(4FGdT9'; + GRANT + SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT + on *.* to db_backup@'%'; + create + user monitor@'%' identified by 'PL3#nGtrWbf-'; + grant REPLICATION + CLIENT on *.* to monitor@'%'; + flush + privileges; +--- +kind: Service +apiVersion: v1 +metadata: + name: cmii-mysql + namespace: zjjt + labels: + app.kubernetes.io/component: primary + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjjt + cmii.app: mysql + cmii.type: middleware + octopus.control: mysql-db-wdd +spec: + ports: + - name: mysql + protocol: TCP + port: 13306 + targetPort: mysql + selector: + app.kubernetes.io/component: primary + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjjt + cmii.app: mysql + cmii.type: middleware + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql-headless + namespace: zjjt + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: 3306 + targetPort: mysql + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjjt + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-mysql + namespace: zjjt + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + annotations: +spec: + type: NodePort + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: mysql + nodePort: 33306 + selector: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjjt + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-mysql + namespace: zjjt + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mysql-db + app.kubernetes.io/release: zjjt + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + serviceName: helm-mysql + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd + labels: + app.kubernetes.io/name: mysql-db + octopus.control: mysql-db-wdd + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: mysql + app.kubernetes.io/component: primary + spec: + serviceAccountName: helm-mysql + affinity: { } + nodeSelector: + mysql-deploy: "true" + securityContext: + fsGroup: 1001 + initContainers: + - name: change-volume-permissions + image: 10.100.2.121:8033/cmii/bitnami-shell:11-debian-11-r136 + imagePullPolicy: "Always" + command: + - /bin/bash + - -ec + - | + chown -R 1001:1001 /bitnami/mysql + securityContext: + runAsUser: 0 + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + containers: + - name: mysql + image: 10.100.2.121:8033/cmii/mysql:8.1.0-debian-11-r42 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "true" + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: helm-mysql + key: mysql-root-password + - name: MYSQL_DATABASE + value: "cmii" + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + readinessProbe: + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + resources: + limits: { } + requests: { } + volumeMounts: + - name: mysql-data + mountPath: /bitnami/mysql + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + volumes: + - name: config + configMap: + name: helm-mysql + - name: custom-init-scripts + configMap: + name: helm-mysql-init-scripts + - name: mysql-data + hostPath: + path: /var/lib/docker/mysql-pv diff --git a/agent-operator/deploy/z_file/k8s-nacos.yaml b/agent-operator/deploy/z_file/k8s-nacos.yaml new file mode 100644 index 0000000..6b3c500 --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-nacos.yaml @@ -0,0 +1,126 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-nacos-cm + namespace: zjjt + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.5.0 +data: + mysql.db.name: "cmii_nacos_config" + mysql.db.host: "helm-mysql" + mysql.port: "3306" + mysql.user: "k8s_admin" + mysql.password: "fP#UaH6qQ3)8" +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-nacos + namespace: zjjt + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.5.0 +spec: + type: NodePort + selector: + cmii.app: helm-nacos + cmii.type: middleware + ports: + - port: 8848 + name: server + targetPort: 8848 + nodePort: 38989 + - port: 9848 + name: server12 + targetPort: 9848 + nodePort: 38912 + - port: 9849 + name: server23 + targetPort: 9849 + nodePort: 38923 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-nacos + namespace: zjjt + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: 5.5.0 +spec: + serviceName: helm-nacos + replicas: 1 + selector: + matchLabels: + cmii.app: helm-nacos + cmii.type: middleware + template: + metadata: + labels: + cmii.app: helm-nacos + cmii.type: middleware + octopus.control: nacos-wdd + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/version: 5.5.0 + annotations: + pod.alpha.kubernetes.io/initialized: "true" + spec: + affinity: { } + containers: + - name: nacos-server + image: 10.100.2.121:8033/cmii/nacos-server:v2.1.2 + ports: + - containerPort: 8848 + name: dashboard + env: + - name: NACOS_AUTH_ENABLE + value: "false" + - name: NACOS_REPLICAS + value: "1" + - name: MYSQL_SERVICE_DB_NAME + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.name + - name: MYSQL_SERVICE_PORT + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.port + - name: MYSQL_SERVICE_USER + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.user + - name: MYSQL_SERVICE_PASSWORD + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.password + - name: MYSQL_SERVICE_HOST + valueFrom: + configMapKeyRef: + name: helm-nacos-cm + key: mysql.db.host + - name: NACOS_SERVER_PORT + value: "8848" + - name: NACOS_APPLICATION_PORT + value: "8848" + - name: PREFER_HOST_MODE + value: "hostname" + - name: MODE + value: standalone + - name: SPRING_DATASOURCE_PLATFORM + value: mysql +--- diff --git a/agent-operator/deploy/z_file/k8s-nfs-test.yaml b/agent-operator/deploy/z_file/k8s-nfs-test.yaml new file mode 100644 index 0000000..0fff1cd --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-nfs-test.yaml @@ -0,0 +1,36 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致 +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-prod-distribute + resources: + requests: + storage: 1Mi +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-pod +spec: + containers: + - name: test-pod + image: 10.100.2.121:8033/cmii/busybox + command: + - "/bin/sh" + args: + - "-c" + - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出 + volumeMounts: + - name: nfs-pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: nfs-pvc + persistentVolumeClaim: + claimName: test-claim #与PVC名称保持一致 diff --git a/agent-operator/deploy/z_file/k8s-nfs.yaml b/agent-operator/deploy/z_file/k8s-nfs.yaml new file mode 100644 index 0000000..8b050ca --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-nfs.yaml @@ -0,0 +1,112 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #根据实际环境设定namespace,下面类同 +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: ClusterRole + # name: nfs-client-provisioner-runner + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get", "list", "watch", "create", "update", "patch" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs-prod-distribute +provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: kube-system #与RBAC文件中的namespace保持一致 +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: 10.100.2.121:8033/cmii/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: cmlc-nfs-storage + - name: NFS_SERVER + value: 192.168.1.1 + - name: NFS_PATH + value: /var/lib/docker/nfs_data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.1.1 + path: /var/lib/docker/nfs_data diff --git a/agent-operator/deploy/z_file/k8s-pvc.yaml b/agent-operator/deploy/z_file/k8s-pvc.yaml new file mode 100644 index 0000000..54b5f33 --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-pvc.yaml @@ -0,0 +1,76 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-backend-log-pvc + namespace: zjjt + labels: + cmii.type: middleware-base + cmii.app: nfs-backend-log-pvc + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.5.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-emqxs + namespace: zjjt + labels: + cmii.type: middleware-base + cmii.app: helm-emqxs + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.5.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-mongo + namespace: zjjt + labels: + cmii.type: middleware-base + cmii.app: helm-mongo + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.5.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 30Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + cmii.type: middleware-base + cmii.app: helm-rabbitmq + helm.sh/chart: all-persistence-volume-claims-1.1.0 + app.kubernetes.io/version: 5.5.0 +spec: + storageClassName: nfs-prod-distribute + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 20Gi diff --git a/agent-operator/deploy/z_file/k8s-rabbitmq.yaml b/agent-operator/deploy/z_file/k8s-rabbitmq.yaml new file mode 100644 index 0000000..216cc60 --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-rabbitmq.yaml @@ -0,0 +1,654 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +automountServiceAccountToken: true +secrets: + - name: helm-rabbitmq +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +type: Opaque +data: + rabbitmq-password: "blljUk45MXIuX2hq" + rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-rabbitmq-config + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +data: + rabbitmq.conf: |- + ## Username and password + ## + default_user = admin + default_pass = nYcRN91r._hj + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + #default_vhost = default-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +subjects: + - kind: ServiceAccount + name: helm-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helm-rabbitmq-endpoint-reader +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq-headless + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: dashboard + port: 15672 + targetPort: stats + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjjt + publishNotReadyAddresses: true +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +spec: + type: NodePort + ports: + - name: amqp + port: 5672 + targetPort: amqp + nodePort: 35672 + - name: dashboard + port: 15672 + targetPort: dashboard + nodePort: 35675 + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjjt +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +spec: + serviceName: helm-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjjt + template: + metadata: + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq + annotations: + checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 + checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f + spec: + + serviceAccountName: helm-rabbitmq + affinity: { } + securityContext: + fsGroup: 5001 + runAsUser: 5001 + terminationGracePeriodSeconds: 120 + initContainers: + - name: volume-permissions + image: 10.100.2.121:8033/cmii/bitnami-shell:10-debian-10-r140 + imagePullPolicy: "Always" + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + resources: + limits: { } + requests: { } + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + containers: + - name: rabbitmq + image: 10.100.2.121:8033/cmii/rabbitmq:3.9.12-debian-10-r3 + imagePullPolicy: "Always" + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "helm-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_FORCE_BOOT + value: "no" + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: "no" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: "admin" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" + ports: + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: dashboard + containerPort: 15672 + - name: epmd + containerPort: 4369 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: 120 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" + else + rabbitmqctl stop_app + fi + resources: + limits: { } + requests: { } + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + volumes: + - name: configuration + configMap: + name: helm-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - name: data + persistentVolumeClaim: + claimName: helm-rabbitmq +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +automountServiceAccountToken: true +secrets: + - name: helm-rabbitmq +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +type: Opaque +data: + rabbitmq-password: "blljUk45MXIuX2hq" + rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-rabbitmq-config + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +data: + rabbitmq.conf: |- + ## Username and password + ## + default_user = admin + default_pass = nYcRN91r._hj + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + #default_vhost = default-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +rules: + - apiGroups: [ "" ] + resources: [ "endpoints" ] + verbs: [ "get" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create" ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helm-rabbitmq-endpoint-reader + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +subjects: + - kind: ServiceAccount + name: helm-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helm-rabbitmq-endpoint-reader +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq-headless + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: 5672 + targetPort: amqp + - name: dist + port: 25672 + targetPort: dist + - name: dashboard + port: 15672 + targetPort: stats + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjjt + publishNotReadyAddresses: true +--- +apiVersion: v1 +kind: Service +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +spec: + type: NodePort + ports: + - name: amqp + port: 5672 + targetPort: amqp + nodePort: 35672 + - name: dashboard + port: 15672 + targetPort: dashboard + nodePort: 35675 + selector: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjjt +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-rabbitmq + namespace: zjjt + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq +spec: + serviceName: helm-rabbitmq-headless + podManagementPolicy: OrderedReady + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: helm-rabbitmq + app.kubernetes.io/release: zjjt + template: + metadata: + labels: + app.kubernetes.io/name: helm-rabbitmq + helm.sh/chart: rabbitmq-8.26.1 + app.kubernetes.io/release: zjjt + app.kubernetes.io/managed-by: rabbitmq + annotations: + checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1 + checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f + spec: + + serviceAccountName: helm-rabbitmq + affinity: { } + securityContext: + fsGroup: 5001 + runAsUser: 5001 + terminationGracePeriodSeconds: 120 + initContainers: + - name: volume-permissions + image: 10.100.2.121:8033/cmii/bitnami-shell:10-debian-10-r140 + imagePullPolicy: "Always" + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "5001:5001" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + resources: + limits: { } + requests: { } + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + containers: + - name: rabbitmq + image: 10.100.2.121:8033/cmii/rabbitmq:3.9.12-debian-10-r3 + imagePullPolicy: "Always" + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "helm-rabbitmq-headless" + - name: K8S_ADDRESS_TYPE + value: hostname + - name: RABBITMQ_FORCE_BOOT + value: "no" + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: "no" + - name: RABBITMQ_LOGS + value: "-" + - name: RABBITMQ_ULIMIT_NOFILES + value: "65536" + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-erlang-cookie + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: "admin" + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: helm-rabbitmq + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap" + ports: + - name: amqp + containerPort: 5672 + - name: dist + containerPort: 25672 + - name: dashboard + containerPort: 15672 + - name: epmd + containerPort: 4369 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: 120 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false" + else + rabbitmqctl stop_app + fi + resources: + limits: { } + requests: { } + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + volumes: + - name: configuration + configMap: + name: helm-rabbitmq-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - name: data + persistentVolumeClaim: + claimName: helm-rabbitmq diff --git a/agent-operator/deploy/z_file/k8s-redis.yaml b/agent-operator/deploy/z_file/k8s-redis.yaml new file mode 100644 index 0000000..24c65a3 --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-redis.yaml @@ -0,0 +1,584 @@ +--- +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: helm-redis + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus +--- +apiVersion: v1 +kind: Secret +metadata: + name: helm-redis + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus +type: Opaque +data: + redis-password: "TWNhY2hlQDQ1MjI=" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-configuration + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus +data: + redis.conf: |- + # User-supplied common configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # End of common configuration + master.conf: |- + dir /data + # User-supplied master configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of master configuration + replica.conf: |- + dir /data + slave-read-only yes + # User-supplied replica configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of replica configuration +--- +# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-health + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-redis-scripts + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus +data: + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + exec redis-server "${ARGS[@]}" + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo 26379 + ;; + "REDIS") + echo 6379 + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + echo "${hostname}.${HEADLESS_SERVICE}" + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + exec redis-server "${ARGS[@]}" +--- +# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-headless + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: SUPREME +--- +# Source: outside-deploy/charts/redis-db/templates/master/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-master + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + type: ClusterIP + + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: SUPREME + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-redis-replicas + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + type: ClusterIP + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + nodePort: null + selector: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: SUPREME + app.kubernetes.io/component: replica +--- +# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-master + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: SUPREME + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus + cmii.type: middleware + cmii.app: redis + app.kubernetes.io/component: master + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + affinity: { } + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 10.100.2.121:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: tmp + emptyDir: { } + - name: redis-data + emptyDir: { } +--- +# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: helm-redis-replicas + namespace: SUPREME + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-db + app.kubernetes.io/release: SUPREME + app.kubernetes.io/component: replica + serviceName: helm-redis-headless + updateStrategy: + rollingUpdate: { } + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: redis-db + octopus.control: redis-db-wdd + app.kubernetes.io/release: SUPREME + app.kubernetes.io/managed-by: octopus + app.kubernetes.io/component: replica + annotations: + checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0 + checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623 + checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98 + checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d + spec: + + securityContext: + fsGroup: 1001 + serviceAccountName: helm-redis + + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: 10.100.2.121:8033/cmii/redis:6.2.6-debian-10-r0 + imagePullPolicy: "Always" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: helm-redis-master-0.helm-redis-headless.SUPREME.svc.cluster.local + - name: REDIS_MASTER_PORT_NUMBER + value: "6379" + - name: ALLOW_EMPTY_PASSWORD + value: "no" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: helm-redis + key: redis-password + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh 1 + resources: + limits: + cpu: "2" + memory: 8Gi + requests: + cpu: "100m" + memory: 1Gi + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + volumes: + - name: start-scripts + configMap: + name: helm-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: helm-redis-health + defaultMode: 0755 + - name: config + configMap: + name: helm-redis-configuration + - name: redis-tmp-conf + emptyDir: { } + - name: redis-data + emptyDir: { } + diff --git a/agent-operator/deploy/z_file/k8s-srs.yaml b/agent-operator/deploy/z_file/k8s-srs.yaml new file mode 100644 index 0000000..66fc976 --- /dev/null +++ b/agent-operator/deploy/z_file/k8s-srs.yaml @@ -0,0 +1,499 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-srs-cm + namespace: zjjt + labels: + cmii.app: live-srs + cmii.type: live + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 +data: + srs.rtc.conf: |- + listen 30935; + max_connections 4096; + srs_log_tank console; + srs_log_level info; + srs_log_file /home/srs.log; + daemon off; + http_api { + enabled on; + listen 1985; + crossdomain on; + } + stats { + network 0; + } + http_server { + enabled on; + listen 8080; + dir /home/hls; + } + srt_server { + enabled on; + listen 30556; + maxbw 1000000000; + connect_timeout 4000; + peerlatency 600; + recvlatency 600; + } + rtc_server { + enabled on; + listen 30090; + candidate $CANDIDATE; + } + vhost __defaultVhost__ { + http_hooks { + enabled on; + on_publish http://helm-live-op-svc-v2:8080/hooks/on_push; + } + http_remux { + enabled on; + } + rtc { + enabled on; + rtmp_to_rtc on; + rtc_to_rtmp on; + keep_bframe off; + } + tcp_nodelay on; + min_latency on; + play { + gop_cache off; + mw_latency 100; + mw_msgs 10; + } + publish { + firstpkt_timeout 8000; + normal_timeout 4000; + mr on; + } + dvr { + enabled off; + dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4; + dvr_plan session; + } + hls { + enabled on; + hls_path /home/hls; + hls_fragment 10; + hls_window 60; + hls_m3u8_file [app]/[stream].m3u8; + hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts; + hls_cleanup on; + hls_entry_prefix http://10.100.2.121:8888; + } + } +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc-exporter + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + nodePort: 30935 + - name: rtc + protocol: UDP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: rtc-tcp + protocol: TCP + port: 30090 + targetPort: 30090 + nodePort: 30090 + - name: srt + protocol: UDP + port: 30556 + targetPort: 30556 + nodePort: 30556 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + nodePort: 30557 + selector: + srs-role: rtc + type: NodePort + sessionAffinity: None + externalTrafficPolicy: Cluster + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srs-svc + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + - name: api + protocol: TCP + port: 1985 + targetPort: 1985 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-srsrtc-svc + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - name: rtmp + protocol: TCP + port: 30935 + targetPort: 30935 + selector: + srs-role: rtc + type: ClusterIP + sessionAffinity: None + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: helm-live-srs-rtc + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-srs + cmii.type: live + helm.sh/chart: cmlc-live-srs-rtc-2.0.0 + srs-role: rtc +spec: + replicas: 1 + selector: + matchLabels: + srs-role: rtc + template: + metadata: + creationTimestamp: null + labels: + srs-role: rtc + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-srs-cm + items: + - key: srs.rtc.conf + path: docker.conf + defaultMode: 420 + - name: srs-vol + emptyDir: + sizeLimit: 8Gi + containers: + - name: srs-rtc + image: 10.100.2.121:8033/cmii/srs:v5.0.195 + ports: + - name: srs-rtmp + containerPort: 30935 + protocol: TCP + - name: srs-api + containerPort: 1985 + protocol: TCP + - name: srs-flv + containerPort: 8080 + protocol: TCP + - name: srs-webrtc + containerPort: 30090 + protocol: UDP + - name: srs-webrtc-tcp + containerPort: 30090 + protocol: TCP + - name: srs-srt + containerPort: 30556 + protocol: UDP + env: + - name: CANDIDATE + value: 10.100.2.121 + resources: + limits: + cpu: 1200m + memory: 6Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /usr/local/srs/conf/docker.conf + subPath: docker.conf + - name: srs-vol + mountPath: /home/dvr + subPath: zjjt/helm-live/dvr + - name: srs-vol + mountPath: /home/hls + subPath: zjjt/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + - name: oss-adaptor + image: 10.100.2.121:8033/cmii/cmii-srs-oss-adaptor:2023-SA + env: + - name: OSS_ENDPOINT + value: 'http://10.100.2.116:9000' + - name: OSS_AK + value: cmii + - name: OSS_SK + value: 'B#923fC7mk' + - name: OSS_BUCKET + value: live-cluster-hls + - name: SRS_OP + value: 'http://helm-live-op-svc-v2:8080' + - name: MYSQL_ENDPOINT + value: 'helm-mysql:3306' + - name: MYSQL_USERNAME + value: k8s_admin + - name: MYSQL_PASSWORD + value: fP#UaH6qQ3)8 + - name: MYSQL_DATABASE + value: cmii_live_srs_op + - name: MYSQL_TABLE + value: live_segment + - name: LOG_LEVEL + value: info + - name: OSS_META + value: 'yes' + resources: + limits: + cpu: 1200m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-vol + mountPath: /cmii/share/hls + subPath: zjjt/helm-live/hls + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + serviceName: helm-live-srsrtc-svc + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: 0 + revisionHistoryLimit: 10 +--- +# live-srs部分 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helm-live-op-v2 + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live + helm.sh/chart: cmlc-live-live-op-2.0.0 + live-role: op-v2 +spec: + replicas: 1 + selector: + matchLabels: + live-role: op-v2 + template: + metadata: + creationTimestamp: null + labels: + live-role: op-v2 + spec: + volumes: + - name: srs-conf-file + configMap: + name: helm-live-op-cm-v2 + items: + - key: live.op.conf + path: bootstrap.yaml + defaultMode: 420 + containers: + - name: helm-live-op-v2 + image: 10.100.2.121:8033/cmii/cmii-live-operator:5.2.0 + ports: + - name: operator + containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 4800m + memory: 4Gi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: srs-conf-file + mountPath: /cmii/bootstrap.yaml + subPath: bootstrap.yaml + livenessProbe: + httpGet: + path: /cmii/ping + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /cmii/ping + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 20 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: { } + imagePullSecrets: + - name: harborsecret + affinity: { } + schedulerName: default-scheduler + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + maxSurge: 25% + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc-v2 + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + nodePort: 30333 + selector: + live-role: op-v2 + type: NodePort + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: helm-live-op-svc + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus +spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + live-role: op + type: ClusterIP + sessionAffinity: None +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: helm-live-op-cm-v2 + namespace: zjjt + labels: + octopus.control: wdd + app.kubernetes.io/managed-by: octopus + cmii.app: live-engine + cmii.type: live +data: + live.op.conf: |- + server: + port: 8080 + spring: + main: + allow-bean-definition-overriding: true + allow-circular-references: true + application: + name: cmii-live-operator + platform: + info: + name: cmii-live-operator + description: cmii-live-operator + version: 5.5.0 + scanPackage: com.cmii.live.op + cloud: + nacos: + config: + username: developer + password: N@cos14Good + server-addr: helm-nacos:8848 + extension-configs: + - data-id: cmii-live-operator.yml + group: 5.5.0 + refresh: true + shared-configs: + - data-id: cmii-backend-system.yml + group: 5.5.0 + refresh: true + discovery: + enabled: false + + live: + engine: + type: srs + endpoint: 'http://helm-live-srs-svc:1985' + + proto: + rtmp: 'rtmp://10.100.2.121:30935' + rtsp: 'rtsp://10.100.2.121:30554' + srt: 'srt://10.100.2.121:30556' + flv: 'http://10.100.2.121:30500' + hls: 'http://10.100.2.121:30500' + rtc: 'webrtc://10.100.2.121:30557' + replay: 'https://10.100.2.121:30333' + minio: + endpoint: http://10.100.2.116:9000 + access-key: cmii + secret-key: B#923fC7mk + bucket: live-cluster-hls diff --git a/agent-operator/go.mod b/agent-operator/go.mod index d0407c8..df4fc01 100644 --- a/agent-operator/go.mod +++ b/agent-operator/go.mod @@ -5,6 +5,7 @@ go 1.22.1 require ( github.com/docker/docker v20.10.17+incompatible github.com/docker/go-units v0.4.0 + github.com/go-playground/validator/v10 v10.19.0 github.com/klauspost/pgzip v1.2.6 github.com/minio/minio-go v6.0.14+incompatible github.com/mittwald/goharbor-client/v5 v5.5.3 @@ -27,6 +28,7 @@ require ( github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -40,6 +42,8 @@ require ( github.com/go-openapi/strfmt v0.21.3 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/goharbor/harbor/src v0.0.0-20230220075213-6015b3efa7d0 // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -51,6 +55,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.13.6 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect diff --git a/agent-operator/go.sum b/agent-operator/go.sum index e0f18c3..fac40bd 100644 --- a/agent-operator/go.sum +++ b/agent-operator/go.sum @@ -26,6 +26,8 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -69,6 +71,14 @@ github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= +github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -151,6 +161,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= diff --git a/agent-operator/main.go b/agent-operator/main.go index 6af27cb..0d1da3f 100644 --- a/agent-operator/main.go +++ b/agent-operator/main.go @@ -82,7 +82,7 @@ DLTUHelp func main() { - // C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd66" -output "build/operator_{{.OS}}_{{.Arch}}" + // C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd64" -output "build/operator_{{.OS}}_{{.Arch}}" //RealProjectRunner() @@ -130,9 +130,9 @@ func main() { fmt.Println("harborHostFullName: ", harborHostFullName) fmt.Println() - var downloadFromOss bool - if ossFileName != "" { - downloadFromOss = true + downloadFromOss := true + if ossFileName == "0" { + downloadFromOss = false } DownloadLoadTagPush(downloadFromOss, ossUrlPrefix, ossFileName, localGzipFolder, harborHostFullName) diff --git a/server/src/test/java/io/wdd/server/func/TestImageSyncScheduler.java b/server/src/test/java/io/wdd/server/func/TestImageSyncScheduler.java index deab721..ee7a6d1 100644 --- a/server/src/test/java/io/wdd/server/func/TestImageSyncScheduler.java +++ b/server/src/test/java/io/wdd/server/func/TestImageSyncScheduler.java @@ -44,7 +44,7 @@ public class TestImageSyncScheduler { ArrayList ImageFullNameList = new ArrayList<>(List.of( // "harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0", // "harbor.cdcyy.com.cn/cmii/cmii/srs:v5.0.195" - "harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.3.0-cqly-042302" + "harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.4.0-cqly-042503" )); Boolean downloadAndCompressOnly = false;