Merge branch 'refs/heads/local-ss'

# Conflicts:
#	agent-operator/deploy/OctopusDeploy.go
This commit is contained in:
zeaslity
2024-07-17 11:01:36 +08:00
885 changed files with 143315 additions and 1046 deletions

View File

0
.fastRequest/collections/Root/directory.json Normal file → Executable file
View File

View File

0
.fastRequest/collections/Root/server/directory.json Normal file → Executable file
View File

View File

View File

0
.github/workflows/build-push-docker.yml vendored Normal file → Executable file
View File

0
.gitignore vendored Normal file → Executable file
View File

View File

0
.run/agent-go bastion.run.xml Normal file → Executable file
View File

0
.run/agent-go main.run.xml Normal file → Executable file
View File

0
.run/go build agent-go.run.xml Normal file → Executable file
View File

0
README.md Normal file → Executable file
View File

0
agent-common/assert/MyAssert.go Normal file → Executable file
View File

0
agent-common/assert/assert.go Normal file → Executable file
View File

0
agent-common/assert/assert_test.go Normal file → Executable file
View File

0
agent-common/go.mod Normal file → Executable file
View File

0
agent-common/go.sum Normal file → Executable file
View File

60
agent-common/image/ImageNameConvert.go Normal file → Executable file
View File

@@ -105,27 +105,48 @@ func ImageFullNameToGzipFileName(imageFullName string) (gzipFileName string) {
}
// ImageNameToTargetImageFullName 将ImageName转换为目标TargetHosts的全名称ImageName的格式为 短名称或者长名称 均可
func ImageNameToTargetImageFullName(imageFullName, targetHostFullName string) string {
func ImageNameToTargetImageFullName(imageName, targetHarborHost string) string {
if strings.HasPrefix(imageFullName, CmiiHarborPrefix) {
imageFullName = strings.TrimPrefix(imageFullName, CmiiHarborPrefix)
} else if strings.HasPrefix(imageFullName, "docker.io") {
imageFullName = strings.TrimPrefix(imageFullName, "docker.io")
}
// rancher/123:v123
if strings.HasPrefix(imageFullName, "rancher") {
return targetHostFullName + "/" + imageFullName
}
// ossr/srs:v4.0.5
if strings.Contains(imageFullName, "/") {
imageFullName = strings.Split(imageFullName, "/")[1]
targetProject := "cmii"
if strings.HasPrefix(imageName, "rancher") {
// rancher/rancher:v2.5.7 rancher名称的形式
targetProject = "rancher"
}
// srs:v4.0.5
// cmii-uav-platform:5.4.0
s := targetHostFullName + "/cmii/" + imageFullName
log.InfoF("ImageFullName: [%s] to TargetImageFullName: [%s]", imageFullName, s)
return s
countOfSplit := strings.Count(imageName, "/")
split := strings.Split(imageName, "/")
targetImageName := imageName
switch countOfSplit {
case 0:
// nginx:latest
targetImageName = targetHarborHost + "/" + targetProject + "/" + imageName
break
default:
// 10.250.0.10:8033/cmii/cmii-uav-gateway:v1.0.0
targetImageName = targetHarborHost + "/" + targetProject + "/" + split[countOfSplit]
break
}
//if strings.HasPrefix(imageFullName, CmiiHarborPrefix) {
// imageFullName = strings.TrimPrefix(imageFullName, CmiiHarborPrefix)
//} else if strings.HasPrefix(imageFullName, "docker.io") {
// imageFullName = strings.TrimPrefix(imageFullName, "docker.io")
//}
//// rancher/123:v123
//if strings.HasPrefix(imageFullName, "rancher") {
// return targetHostFullName + "/" + imageFullName
//}
//// ossr/srs:v4.0.5
//if strings.Contains(imageFullName, "/") {
// imageFullName = strings.Split(imageFullName, "/")[1]
//}
//
//// srs:v4.0.5
//// cmii-uav-platform:5.4.0
//s := targetHostFullName + "/cmii/" + imageFullName
log.InfoF("ImageFullName: [%s] to TargetImageFullName: [%s]", imageName, targetImageName)
return targetImageName
}
func GzipFileNameToImageFullName(gzipFileName string) (imageFullName string) {
@@ -183,6 +204,9 @@ func GzipFolderPathToCmiiImageTagMaps(gzipFolderPath string) (frontendImageVersi
filepath.WalkDir(gzipFolderPath, func(path string, d os.DirEntry, err error) error {
//fmt.Println(path)
if d == nil {
return nil
}
name := d.Name()
if strings.HasSuffix(name, ".tar.gz") {
imageName, imageTag := GzipFileNameToImageNameAndTag(name)

16
agent-common/image/ImageNameConvert_test.go Normal file → Executable file
View File

@@ -200,13 +200,27 @@ func TestGzipFileNameToImageFullName(t *testing.T) {
}
}
func TestImageGzipFileNameToImageFullName_1(t *testing.T) {
gzipFilePrefix := "/root/octopus_image/middle/"
filepath.WalkDir(gzipFilePrefix, func(path string, d os.DirEntry, err error) error {
//fmt.Println(path)
name := d.Name()
if strings.HasSuffix(name, ".tar.gz") {
fullName := GzipFileNameToImageFullName(name)
fmt.Println(fullName)
}
return nil
})
}
func TestImageGzipFileNameToImageFullName(t *testing.T) {
frontendMap := make(map[string]string)
backendMap := make(map[string]string)
srsMap := make(map[string]string)
gzipFilePrefix := "/root/octopus_image/xjyd/"
gzipFilePrefix := "/root/octopus_image/middle/"
filepath.WalkDir(gzipFilePrefix, func(path string, d os.DirEntry, err error) error {
//fmt.Println(path)
name := d.Name()

0
agent-common/logger/logger.go Normal file → Executable file
View File

0
agent-common/pusher/CmiiUpdateMessage.go Normal file → Executable file
View File

0
agent-common/pusher/CmiiUpdateMessage_test.go Normal file → Executable file
View File

0
agent-common/utils/DownloadUtils.go Normal file → Executable file
View File

30
agent-common/utils/FileUtils.go Normal file → Executable file
View File

@@ -214,7 +214,7 @@ func RemoveFolderComplete(folderName string) bool {
return true
}
func ReadLineFromFile(fileFullPath string) (result []string) {
func ReadAllContentFromFile(fileFullPath string) (result []string) {
f, err := os.Open(fileFullPath)
if err != nil {
@@ -238,3 +238,31 @@ func ReadLineFromFile(fileFullPath string) (result []string) {
return result
}
// FolderMoveFiles 将源文件夹中除了子文件夹外的所有文件移动到目标文件夹
func FolderMoveFiles(srcDir, dstDir string) error {
// 读取源文件夹中的所有条目
entries, err := os.ReadDir(srcDir)
if err != nil {
return fmt.Errorf("读取源文件夹失败: %w", err)
}
// 遍历所有条目
for _, entry := range entries {
// 跳过子文件夹
if entry.IsDir() {
continue
}
// 构造源文件路径和目标文件路径
srcPath := filepath.Join(srcDir, entry.Name())
dstPath := filepath.Join(dstDir, entry.Name())
// 移动文件
if err := os.Rename(srcPath, dstPath); err != nil {
return fmt.Errorf("移动文件失败: %w", err)
}
}
return nil
}

View File

@@ -0,0 +1,8 @@
package utils
func MergeMap(originMap map[string]string, mergeInMap map[string]string) map[string]string {
for k, v := range mergeInMap {
originMap[k] = v
}
return originMap
}

0
agent-common/utils/MathUtils.go Normal file → Executable file
View File

12
agent-common/utils/PrintUtils.go Normal file → Executable file
View File

@@ -31,13 +31,21 @@ func BeautifulPrintToString(object interface{}) string {
return string(bytes)
}
func BeautifulPrintWithTitle(contend any, title string) {
fmt.Println()
fmt.Println(fmt.Sprintf("content tile is => %s", title))
bytes, _ := json.MarshalIndent(contend, "", " ")
fmt.Println(string(bytes))
fmt.Println("---------- end -----------")
}
func BeautifulPrintListWithTitle(contend []string, title string) {
fmt.Println()
fmt.Println(fmt.Sprintf("content tile is => %s", title))
for _, line := range contend {
bytes, _ := json.MarshalIndent(line, "", " ")
fmt.Println(string(bytes))
fmt.Println(line)
}
fmt.Println("---------- end -----------")
}

0
agent-common/utils/ReflectUtils.go Normal file → Executable file
View File

38
agent-common/utils/StringUtils.go Normal file → Executable file
View File

@@ -1,19 +1,36 @@
package utils
import (
"encoding/base64"
"fmt"
"math/rand"
"time"
)
func GenerateRandomString(length int) string {
func GenerateRandomString(length int, includeSpecialChar bool) string {
rand.Seed(time.Now().UnixNano())
chars := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
b := make([]byte, length)
letters := "abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ"
specialChars := ".!@_#%^&*()-+"
b := make([]rune, length)
letterProbability := len(letters) * 100 / (len(letters) + len(specialChars))
if includeSpecialChar {
for i := range b {
b[i] = chars[rand.Intn(len(chars))]
if rand.Intn(100) < letterProbability { // 概率选择字母
b[i] = rune(letters[rand.Intn(len(letters))])
} else { // 概率选择特殊字符
b[i] = rune(specialChars[rand.Intn(len(specialChars))])
}
}
} else {
for i := range b {
b[i] = rune(letters[rand.Intn(len(letters))])
}
}
return string(b)
}
@@ -49,3 +66,16 @@ func ByteSizeToString(size uint64) string {
return fmt.Sprintf("%.2f %s", value, unit)
}
func Base64Encode(content string) string {
return base64.StdEncoding.EncodeToString([]byte(content))
}
func Base64Decode(content string) string {
decodeString, err := base64.StdEncoding.DecodeString(content)
if err != nil {
log.ErrorF("Base64Decode error: %s", err.Error())
return ""
}
return string(decodeString)
}

View File

@@ -0,0 +1,37 @@
package utils
import (
"fmt"
"testing"
)
func TestBase64Decode(t *testing.T) {
decode := Base64Decode("blljUk45MXIuX2hq")
fmt.Println(decode)
}
func TestBase64Encode(t *testing.T) {
encode := Base64Encode("RB6Vfzs7XdC2")
fmt.Println(encode)
}
func TestGetRandomString(t *testing.T) {
for i := 0; i < 10; i++ {
randomString := GenerateRandomString(10, true)
fmt.Printf("id: %d randomString: %s\n", i, randomString)
}
}
func TestGetRandomMySQLPassword(t *testing.T) {
rootPassword := GenerateRandomString(12, false)
rootPasswordBase64 := Base64Encode(rootPassword)
k8sAdminPass := GenerateRandomString(12, true)
fmt.Println(rootPassword)
fmt.Println(rootPasswordBase64)
fmt.Println()
fmt.Println(k8sAdminPass)
}

0
agent-common/utils/TimeUtils.go Normal file → Executable file
View File

267
agent-deploy/OctopusDeploy.go Executable file
View File

@@ -0,0 +1,267 @@
package agent_deploy
import (
"os"
image2 "wdd.io/agent-common/image"
"wdd.io/agent-common/logger"
"wdd.io/agent-common/utils"
"wdd.io/agent-deploy/a_dashboard"
"wdd.io/agent-deploy/b_nfs"
"wdd.io/agent-deploy/c_middle"
"wdd.io/agent-deploy/d_app"
"wdd.io/agent-deploy/e_cmii"
"wdd.io/agent-deploy/z_dep"
)
var log = logger.Log
const (
dev = "uavcloud-dev"
devFlight = "uavcloud-devflight"
devOperation = "uavcloud-devoperation"
validation = "uavcloud-feature"
integration = "uavcloud-test"
uat = "uavcloud-uat"
demo = "uavcloud-demo"
uavms = "uavcloud-uavms"
)
func OctopusDeploy() {
// common environment
common := &z_dep.CommonEnvironmentConfig{
WebIP: "10.250.0.110",
WebPort: "8888",
HarborIPOrCustomImagePrefix: "10.250.0.110",
HarborPort: "8033",
Namespace: "bjtg",
TagVersion: "5.5.0",
TenantEnv: "",
MinioPublicIP: "10.250.0.110",
MinioInnerIP: "10.250.0.110",
NFSServerIP: "10.250.0.110",
}
// kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
a_dashboard.K8sDashboardDeploy(common)
b_nfs.NFSDeploy(common)
b_nfs.NFSTestDeploy(common)
// pvc
c_middle.PVCDeploy(common)
// middlewares
c_middle.MidMySQlDeploy(common)
c_middle.MidRedisDeploy(common)
c_middle.MidEmqxDeploy(common)
c_middle.MidMongoDeploy(common)
c_middle.MidRabbitMQDeploy(common)
c_middle.MidRabbitMQDeploy(common)
c_middle.MidNacosDeploy(common)
configMapDeploy(common)
d_app.IngressDeploy(common)
gzipFilePrefix := "/root/octopus_image/xjyd/"
frontendImageVersionMap, backendImageVersionMap, _ := image2.GzipFolderPathToCmiiImageTagMaps(gzipFilePrefix)
backendDeploy(common, backendImageVersionMap)
frontendDeploy(common, frontendImageVersionMap)
d_app.SRSDeploy(common)
}
func CmiiAppDeploy() {
// common environment
common := &z_dep.CommonEnvironmentConfig{
WebIP: "36.133.201.78",
WebPort: "8888",
HarborIPOrCustomImagePrefix: "192.168.0.14",
HarborPort: "",
Namespace: "xjyd",
TagVersion: "5.5.0",
TenantEnv: "",
MinioPublicIP: "36.133.201.146",
MinioInnerIP: "192.168.0.21",
NFSServerIP: "192.168.0.14",
}
//frontendImageVersionMap, backendImageVersionMap, _ := image.FrontendBackendSrsImageMapFromCmiiImageMap(zjjt.CmiiImageMap)
gzipFilePrefix := "/root/octopus_image/xjyd/"
frontendImageVersionMap, backendImageVersionMap, _ := image2.GzipFolderPathToCmiiImageTagMaps(gzipFilePrefix)
backendDeploy(common, backendImageVersionMap)
frontendDeploy(common, frontendImageVersionMap)
//utils.BeautifulPrint(frontendImageVersionMap)
//configMapDeploy(common)
//c_app.IngressDeploy(common)
}
var IgnoreCmiiBackendAppName = map[string]string{
"cmii-uav-grid-datasource": "0",
"cmii-uav-grid-manage": "",
"cmii-uav-grid-engine": "",
"cmii-uav-kpi-monitor": "",
"cmii-uav-gis-server": "",
"cmii-app-release": "",
"cmii-uav-autowaypoint": "",
"cmii-uav-integration": "",
"cmii-uav-developer": "",
"cmii-open-gateway": "",
"cmii-uav-brain": "",
"cmii-uav-data-post-process": "",
"cmii-uav-multilink": "",
"cmii-uav-alarm": "",
"cmii-uav-tower": "",
"cmii-uav-clusters": "",
"cmii-uav-depotautoreturn": "",
}
func backendDeploy(common *z_dep.CommonEnvironmentConfig, backendImageVersionMap map[string]string) {
os.Remove(z_dep.BackendApplyFilePath)
for appName, tag := range backendImageVersionMap {
d_app.DefaultCmiiBackendConfig.AppName = appName
d_app.DefaultCmiiBackendConfig.ImageTag = tag
_, ok := IgnoreCmiiBackendAppName[appName]
if ok {
d_app.DefaultCmiiBackendConfig.Replicas = "0"
} else {
d_app.DefaultCmiiBackendConfig.Replicas = "1"
}
d_app.DefaultCmiiBackendConfig.BackendDeploy(common)
}
}
func frontendDeploy(common *z_dep.CommonEnvironmentConfig, frontendImageVersionMap map[string]string) {
os.Remove(z_dep.FrontendApplyFilePath)
d_app.FrontendDefaultNginxDeploy(common)
for appName, tag := range frontendImageVersionMap {
d_app.DefaultCmiiFrontendConfig.AppName = appName
d_app.DefaultCmiiFrontendConfig.ImageTag = tag
d_app.DefaultCmiiFrontendConfig.Replicas = "1"
value, ok := d_app.FrontendShortNameMaps[appName]
if !ok {
log.ErrorF("FrontendShortNameMaps error ! not contains %s", appName)
continue
}
d_app.DefaultCmiiFrontendConfig.ShortName = value
d_app.DefaultCmiiFrontendConfig.FrontendDeploy(common)
}
}
func configMapDeploy(common *z_dep.CommonEnvironmentConfig) {
os.Remove(z_dep.ConfigMapApplyFilePath)
for frontendName, shortName := range d_app.FrontendShortNameMaps {
d_app.DefaultCmiiFrontendConfig.AppName = frontendName
d_app.DefaultCmiiFrontendConfig.ShortName = shortName
value, ok := d_app.FrontendClientIdMaps[frontendName]
if !ok {
log.ErrorF("FrontendClientIdMaps error ! not contains %s", frontendName)
continue
}
d_app.DefaultCmiiFrontendConfig.ClientId = value
d_app.DefaultCmiiFrontendConfig.ConfigMapDeploy(common)
}
}
// CmiiEnvironmentDeploy 部署完整的CMII环境的所有组件
func CmiiEnvironmentDeploy(isCompleteDeploy bool, commonEnv *z_dep.CommonEnvironmentConfig, backendImageVersionMap, frontendImageVersionMap map[string]string) {
// clear old apply file
_ = os.Mkdir(commonEnv.ApplyFilePrefix, os.ModePerm)
oldApplyFileFolder := commonEnv.ApplyFilePrefix + "old"
utils.RemoveFolderComplete(oldApplyFileFolder)
_ = os.Mkdir(oldApplyFileFolder, os.ModePerm)
// move all apply file to old folder
_ = utils.FolderMoveFiles(commonEnv.ApplyFilePrefix, oldApplyFileFolder)
// get cmii env config from namespace
cmiiEnvConfig := getCmiiEnvConfigurationFromNamespace(commonEnv.Namespace)
// generate
// generate new apply file for specific environment
if isCompleteDeploy {
// pvc
c_middle.PVCDeploy(commonEnv)
// middlewares
cmiiEnvConfig.MySQlConfig.MidMySQlDeploy(commonEnv)
cmiiEnvConfig.RedisConfig.MidRedisDeploy(commonEnv)
cmiiEnvConfig.EmqxConfig.MidEmqxDeploy(commonEnv)
cmiiEnvConfig.MongoConfig.MidMongoDeploy(commonEnv)
cmiiEnvConfig.RabbitMQConfig.MidRabbitMQDeploy(commonEnv)
cmiiEnvConfig.NacosConfig.MidNacosDeploy(commonEnv)
configMapDeploy(commonEnv)
d_app.DefaultIngressConfig.IngressDeploy(commonEnv)
}
// frontend
frontendDeploy(commonEnv, frontendImageVersionMap)
// backend
backendDeploy(commonEnv, backendImageVersionMap)
// srs
cmiiEnvConfig.CmiiSrsConfig.SRSDeploy(commonEnv)
}
func CmiiNewAppDeploy(commonEnv *z_dep.CommonEnvironmentConfig, backendImageVersionMap, frontendImageVersionMap map[string]string) {
// get cmii env config from namespace
//cmiiEnvConfig := getCmiiEnvConfigurationFromNamespace(commonEnv.Namespace)
// frontend
configMapDeploy(commonEnv)
d_app.DefaultIngressConfig.IngressDeploy(commonEnv)
// frontend
frontendDeploy(commonEnv, frontendImageVersionMap)
// backend
backendDeploy(commonEnv, backendImageVersionMap)
}
func getCmiiEnvConfigurationFromNamespace(namespace string) *e_cmii.CmiiEnvConfig {
switch namespace {
case dev:
return e_cmii.CmiiDevConfig
case devFlight:
return e_cmii.CmiiDevFlightConfig
case devOperation:
return e_cmii.CmiiDevOperationConfig
case integration:
return e_cmii.CmiiIntegrationConfig
case uat:
return e_cmii.CmiiUatConfig
case validation:
return e_cmii.CmiiValidationConfig
case uavms:
return e_cmii.CmiiDemoConfig
default:
return e_cmii.CmiiOutSideConfig
}
}

View File

@@ -1,4 +1,4 @@
package deploy
package agent_deploy
import "testing"

View File

@@ -0,0 +1,14 @@
package a_dashboard
import (
"wdd.io/agent-common/logger"
"wdd.io/agent-deploy/z_dep"
)
var (
log = logger.Log
)
func K8sDashboardDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiK8sDashboardTemplate, z_dep.K8sDashboardApplyFilePath)
}

View File

@@ -177,9 +177,15 @@ spec:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/dashboard:v2.0.1
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/dashboard:v2.0.1
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}dashboard:v2.0.1
{{- end }}
ports:
- containerPort: 8443
protocol: TCP
@@ -260,7 +266,11 @@ spec:
spec:
containers:
- name: dashboard-metrics-scraper
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/metrics-scraper:v1.0.4
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/metrics-scraper:v1.0.4
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}metrics-scraper:v1.0.4
{{- end }}
ports:
- containerPort: 8000
protocol: TCP

22
agent-deploy/b_nfs/DeployNFS.go Executable file
View File

@@ -0,0 +1,22 @@
package b_nfs
import (
"wdd.io/agent-common/logger"
"wdd.io/agent-deploy/z_dep"
)
var (
log = logger.Log
)
type NfsDeployConfig struct {
NfsLocalPath string
}
func NFSDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiNfsTemplate, z_dep.NfsApplyFilePath)
}
func NFSTestDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiNFSTestTemplate, z_dep.NfsTestApplyFilePath)
}

View File

@@ -1,4 +1,4 @@
package a_nfs
package b_nfs
const CmiiNfsTemplate = `
apiVersion: v1
@@ -82,6 +82,8 @@ metadata:
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
imagePullSecrets:
- name: harborsecret
replicas: 1
selector:
matchLabels:
@@ -96,7 +98,11 @@ spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/nfs-subdir-external-provisioner:v4.0.2
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/nfs-subdir-external-provisioner:v4.0.2
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}nfs-subdir-external-provisioner:v4.0.2
{{- end }}
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes

View File

@@ -1,4 +1,4 @@
package a_nfs
package b_nfs
const CmiiNFSTestTemplate = `
kind: PersistentVolumeClaim
@@ -20,9 +20,15 @@ apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/busybox
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/busybox:latest
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}busybox:latest
{{- end }}
command:
- "/bin/sh"
args:

View File

@@ -1,4 +1,4 @@
package b_middle
package c_middle
const CmiiEmqxTemplate = `
apiVersion: v1
@@ -20,14 +20,14 @@ metadata:
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
data:
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443
EMQX_NAME: helm-emqxs
EMQX_CLUSTER__DISCOVERY: k8s
EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs
EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: {{ .Namespace }}
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
EMQX_CLUSTER__K8S__namespace: "{{ .Namespace }}"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
@@ -44,10 +44,18 @@ metadata:
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
data:
emqx_auth_username.conf: |-
auth.user.1.username = cmlc
auth.user.1.password = odD8#Ve7.B
auth.user.password_hash = sha256
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
# auth.client.1.clientid = admin
# auth.client.1.password = 4YPk*DS%+5
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = {{ .EmqxPassword }}
auth.user.2.username = cmlc
auth.user.2.password = {{ .EmqxPassword }}
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
@@ -57,7 +65,8 @@ data:
{allow, all}.
loaded_plugins: |-
{emqx_auth_username,true}.
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
@@ -99,10 +108,16 @@ spec:
app.kubernetes.io/version: {{ .TagVersion }}
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/emqx:5.5.1
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/emqx:4.4.9
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}emqx:4.4.9
{{- end }}
imagePullPolicy: Always
ports:
- name: mqtt
@@ -128,8 +143,8 @@ spec:
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
@@ -147,8 +162,8 @@ spec:
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
@@ -205,15 +220,15 @@ spec:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
nodePort: {{ .EmqxNodePort }}
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
nodePort: {{ .EmqxDashboardNodePort }}
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
nodePort: {{ .EmqxWebSocketNodePort }}
---
apiVersion: v1
kind: Service

View File

@@ -1,4 +1,4 @@
package b_middle
package c_middle
const CmiiMongoTemplate = `
apiVersion: v1
@@ -13,7 +13,7 @@ metadata:
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
spec:
type: ClusterIP
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
@@ -21,6 +21,7 @@ spec:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: {{ .MongoNodePort }}
---
apiVersion: apps/v1
kind: StatefulSet
@@ -51,10 +52,16 @@ spec:
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/mongo:5.0
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/mongo:5.0
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}mongo:5.0
{{- end }}
resources: {}
ports:
- containerPort: 27017
@@ -64,7 +71,7 @@ spec:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
value: {{ .MongoPassword }}
volumeMounts:
- name: mongo-data
mountPath: /data/db

View File

@@ -1,4 +1,4 @@
package b_middle
package c_middle
const CmiiMySQLTemplate = `
apiVersion: v1
@@ -11,7 +11,7 @@ metadata:
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
annotations:
annotations: {}
secrets:
- name: helm-mysql
---
@@ -27,7 +27,7 @@ metadata:
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-root-password: "{{ .MySQLRootPasswordBase64 }}"
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
@@ -167,7 +167,7 @@ data:
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
create user k8s_admin@'%' identified by '{{ .MySQLK8sAdminPassword }}';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
@@ -217,7 +217,7 @@ metadata:
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
annotations: {}
spec:
type: ClusterIP
clusterIP: None
@@ -246,7 +246,7 @@ metadata:
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
annotations: {}
spec:
type: NodePort
ports:
@@ -254,7 +254,7 @@ spec:
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
nodePort: {{ .MySQLNodePort }}
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: {{ .Namespace }}
@@ -300,6 +300,8 @@ spec:
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
@@ -308,7 +310,11 @@ spec:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/bitnami-shell:11-debian-11-r136
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/bitnami-shell:11-debian-11-r136
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}bitnami-shell:11-debian-11-r136
{{- end }}
imagePullPolicy: "Always"
command:
- /bin/bash
@@ -322,7 +328,11 @@ spec:
mountPath: /bitnami/mysql
containers:
- name: mysql
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/mysql:8.1.0-debian-11-r42
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/mysql:8.1.0-debian-11-r42
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}mysql:8.1.0-debian-11-r42
{{- end }}
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
@@ -407,5 +417,5 @@ spec:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv
path: /var/lib/docker/mysql-pv/{{ .Namespace }}/
`

View File

@@ -1,4 +1,4 @@
package b_middle
package c_middle
const CmiiNacosTemplate = `
apiVersion: v1
@@ -17,7 +17,7 @@ data:
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
mysql.password: "{{ .MySQLK8sAdminPassword }}"
---
apiVersion: v1
kind: Service
@@ -39,15 +39,13 @@ spec:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
nodePort: {{ .NacosNodePort }}
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
@@ -78,13 +76,23 @@ spec:
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/nacos-server:v2.1.2
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/nacos-server:v2.1.2
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}nacos-server:v2.1.2
{{- end }}
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"

View File

@@ -1,4 +1,4 @@
package b_middle
package c_middle
const CmiiRabbitMQTemplate = `
apiVersion: v1
@@ -27,7 +27,7 @@ metadata:
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-password: "{{ .RabbitPasswordBase64 }}"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
@@ -45,7 +45,7 @@ data:
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
default_pass = {{ .RabbitPassword }}
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
@@ -143,11 +143,11 @@ spec:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
nodePort: {{ .RabbitNodePort }}
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 35675
nodePort: {{ .RabbitDashboardNodePort }}
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: {{ .Namespace }}
@@ -183,7 +183,8 @@ spec:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
@@ -192,7 +193,11 @@ spec:
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/bitnami-shell:10-debian-10-r140
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/bitnami-shell:11-debian-11-r136
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}bitnami-shell:11-debian-11-r136
{{- end }}
imagePullPolicy: "Always"
command:
- /bin/bash
@@ -211,7 +216,11 @@ spec:
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/rabbitmq:3.9.12-debian-10-r3
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/rabbitmq:3.9.12-debian-10-r3
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}rabbitmq:3.9.12-debian-10-r3
{{- end }}
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG

View File

@@ -1,4 +1,4 @@
package b_middle
package c_middle
const CmiiRedisTemplate = `
apiVersion: v1
@@ -349,11 +349,16 @@ spec:
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}redis:6.2.6-debian-10-r0
{{- end }}
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
@@ -481,13 +486,19 @@ spec:
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}redis:6.2.6-debian-10-r0
{{- end }}
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001

View File

@@ -0,0 +1,214 @@
package c_middle
import (
"encoding/base64"
"github.com/go-playground/validator/v10"
"wdd.io/agent-common/logger"
"wdd.io/agent-common/utils"
"wdd.io/agent-deploy/z_dep"
)
var (
log = logger.Log
DefaultMysqlConfig = &MySQlConfig{
MySQLNodePort: "33306",
MySQLRootPasswordBase64: base64.StdEncoding.EncodeToString([]byte("QzfXQhd3bQ")),
MySQLRootPassword: "QzfXQhd3bQ",
MySQLK8sAdminPassword: "fP#UaH6qQ3)8",
}
DefaultRabbitConfig = &RabbitMQConfig{
RabbitNodePort: "35672",
RabbitDashboardNodePort: "35675",
RabbitPassword: "nYcRN91r._hj",
RabbitPasswordBase64: "blljUk45MXIuX2hq",
}
DefaultEmqxConfig = &EmqxConfig{
EmqxNodePort: "31883",
EmqxDashboardNodePort: "38085",
EmqxWebSocketNodePort: "38083",
EmqxPassword: "odD8#Ve7.B",
}
DefaultMongoConfig = &MongoConfig{
MongoPassword: "REdPza8#oVlt",
}
)
type MySQlConfig struct {
z_dep.CommonEnvironmentConfig
MySQLNodePort string
MySQLRootPassword string `validate:"required" comment:"string"`
MySQLRootPasswordBase64 string `validate:"required" comment:"base64"`
MySQLK8sAdminPassword string `validate:"required" comment:"string"`
}
type RedisConfig struct {
z_dep.CommonEnvironmentConfig
}
type RabbitMQConfig struct {
z_dep.CommonEnvironmentConfig
RabbitNodePort string
RabbitDashboardNodePort string
RabbitPassword string
RabbitPasswordBase64 string
}
type NacosConfig struct {
z_dep.CommonEnvironmentConfig
MySQLK8sAdminPassword string
NacosNodePort string
}
type MongoConfig struct {
z_dep.CommonEnvironmentConfig
MongoPassword string
MongoNodePort string
}
type EmqxConfig struct {
z_dep.CommonEnvironmentConfig
EmqxNodePort string
EmqxDashboardNodePort string
EmqxWebSocketNodePort string
EmqxPassword string
}
func (emqx *EmqxConfig) MidEmqxDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, emqx)
validate := validator.New()
err := validate.Struct(emqx)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(emqx, CmiiEmqxTemplate, z_dep.EmqxApplyFilePath) {
return false
}
return true
}
func MidEmqxDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiEmqxTemplate, z_dep.EmqxApplyFilePath)
}
func (mongo *MongoConfig) MidMongoDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, mongo)
validate := validator.New()
err := validate.Struct(mongo)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(mongo, CmiiMongoTemplate, z_dep.MongoApplyFilePath) {
return false
}
return true
}
func MidMongoDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiMongoTemplate, z_dep.MongoApplyFilePath)
}
func (rabbit *RabbitMQConfig) MidRabbitMQDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, rabbit)
validate := validator.New()
err := validate.Struct(rabbit)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(rabbit, CmiiRabbitMQTemplate, z_dep.RabbitMQApplyFilePath) {
return false
}
return true
}
func MidRabbitMQDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiRabbitMQTemplate, z_dep.RabbitMQApplyFilePath)
}
func (redis *RedisConfig) MidRedisDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, redis)
validate := validator.New()
err := validate.Struct(redis)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(redis, CmiiRedisTemplate, z_dep.RedisApplyFilePath) {
return false
}
return true
}
func MidRedisDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiRedisTemplate, z_dep.RedisApplyFilePath)
}
func (mysql *MySQlConfig) MidMySQlDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, mysql)
validate := validator.New()
err := validate.Struct(mysql)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(mysql, CmiiMySQLTemplate, z_dep.MySQLApplyFilePath) {
return false
}
return true
}
func MidMySQlDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiMySQLTemplate, z_dep.MySQLApplyFilePath)
}
func (nacos *NacosConfig) MidNacosDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, nacos)
validate := validator.New()
err := validate.Struct(nacos)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(nacos, CmiiNacosTemplate, z_dep.NacosApplyFilePath) {
return false
}
return true
}
func MidNacosDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiNacosTemplate, z_dep.NacosApplyFilePath)
}
func PVCDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiPVCTemplate, z_dep.PVCApplyFilePath)
}

View File

@@ -1,4 +1,4 @@
package b_middle
package c_middle
const CmiiPVCTemplate = `
apiVersion: v1

View File

@@ -1,4 +1,102 @@
package image
package d_app
var CmiiBackendAppMap = map[string]string{
"cmii-admin-data": "5.2.0",
"cmii-admin-gateway": "5.2.0",
"cmii-admin-user": "5.2.0",
"cmii-app-release": "4.2.0-validation",
"cmii-open-gateway": "5.2.0",
"cmii-suav-supervision": "5.2.0",
"cmii-uav-airspace": "5.2.0",
"cmii-uav-alarm": "5.2.0",
"cmii-uav-autowaypoint": "4.1.6-cm-0828",
"cmii-uav-brain": "5.2.0",
"cmii-uav-cloud-live": "5.2.0",
"cmii-uav-clusters": "5.2.0",
"cmii-uav-cms": "5.2.0",
"cmii-uav-data-post-process": "5.2.0",
"cmii-uav-depotautoreturn": "4.2.0",
"cmii-uav-developer": "5.2.0-25858",
"cmii-uav-device": "5.2.0",
"cmii-uav-emergency": "5.2.0",
"cmii-uav-gateway": "5.2.0",
"cmii-uav-industrial-portfolio": "5.2.0-25268-10",
"cmii-uav-integration": "5.2.0-25447",
"cmii-uav-kpi-monitor": "5.2.0",
"cmii-uav-logger": "5.2.0",
"cmii-uav-material-warehouse": "5.2.0",
"cmii-uav-mission": "5.2.0-25840",
"cmii-uav-mqtthandler": "5.2.0-25340",
"cmii-uav-notice": "5.2.0",
"cmii-uav-oauth": "5.2.0",
"cmii-uav-process": "5.2.0",
"cmii-uav-surveillance": "5.2.0-25854",
"cmii-uav-threedsimulation": "5.2.0",
"cmii-uav-tower": "5.2.0",
"cmii-uav-user": "5.2.0",
"cmii-uav-waypoint": "5.2.0",
"cmii-uav-multilink": "5.2.0",
"cmii-uav-bridge": "5.2.0",
"cmii-uas-lifecycle": "5.2.0",
"cmii-uas-gateway": "5.2.0",
"cmii-uav-gis-server": "5.4.0",
"cmii-uav-grid-datasource": "5.4.0",
"cmii-uav-grid-engine": "5.4.0",
"cmii-uav-grid-manage": "5.4.0",
"cmii-uav-sense-adapter": "5.4.0",
}
var CmiiFrontendAppMap = map[string]string{
"cmii-suav-platform-supervision": "5.2.0",
"cmii-suav-platform-supervisionh5": "5.2.0",
"cmii-uav-platform": "5.2.0-011004",
"cmii-uav-platform-ai-brain": "5.2.0",
"cmii-uav-platform-armypeople": "5.2.0-24538",
"cmii-uav-platform-base": "5.2.0",
"cmii-uav-platform-cms-portal": "5.2.0",
"cmii-uav-platform-detection": "5.2.0",
"cmii-uav-platform-emergency-rescue": "5.2.0",
"cmii-uav-platform-hljtt": "5.2.0",
"cmii-uav-platform-jiangsuwenlv": "4.1.3-jiangsu-0427",
"cmii-uav-platform-logistics": "5.2.0",
"cmii-uav-platform-media": "5.2.0",
"cmii-uav-platform-multiterminal": "5.2.0",
"cmii-uav-platform-mws": "5.2.0",
"cmii-uav-platform-oms": "5.2.0",
"cmii-uav-platform-open": "5.2.0",
"cmii-uav-platform-qingdao": "4.1.6-24238-qingdao",
"cmii-uav-platform-qinghaitourism": "4.1.0-21377-0508",
"cmii-uav-platform-security": "4.1.6",
"cmii-uav-platform-securityh5": "5.2.0",
"cmii-uav-platform-seniclive": "5.2.0",
"cmii-uav-platform-share": "5.2.0",
"cmii-uav-platform-splice": "5.2.0",
"cmii-uav-platform-threedsimulation": "5.2.0-21392",
"cmii-uav-platform-visualization": "5.2.0",
"cmii-uav-platform-uasms": "5.2.0",
"cmii-uav-platform-uas": "5.2.0",
}
var CmiiMiddlewareNameMap = map[string]string{
"helm-nacos": "single",
"helm-emqxs": "single",
"helm-mysql": "single",
"helm-redis": "replication",
"helm-rabbitmq": "single",
}
var CmiiSrsAppMap = map[string]string{
"helm-live-op-v2": "deployment",
"helm-live-rtsp-op": "4.1.6",
"helm-live-srs-rtc": "statefulset",
}
var CmiiGISAppMap = map[string]string{
"cmii-uav-gis-server": "5.4.0",
"cmii-uav-grid-datasource": "5.4.0",
"cmii-uav-grid-engine": "5.4.0",
"cmii-uav-grid-manage": "5.4.0",
}
var MiddlewareAmd64 = []string{
"bitnami/redis:6.2.6-debian-10-r0",
@@ -13,7 +111,7 @@ var MiddlewareAmd64 = []string{
"ossrs/srs:v4.0.136",
"ossrs/srs:v5.0.195",
"ossrs/srs:v4.0-r3",
"emqx/emqx:4.2.12",
"emqx/emqx:4.4.9",
"emqx/emqx:5.5.1",
"nacos/nacos-server:v2.1.2",
"nacos/nacos-server:v2.1.2-slim",
@@ -27,6 +125,7 @@ var MiddlewareAmd64 = []string{
"redis:6.0.20-alpine",
"dyrnq/nfs-subdir-external-provisioner:v4.0.2",
"jerrychina2020/rke-tools:v0.175-linux",
"jerrychina2020/rke-tools:v0.175",
"busybox:latest",
}

View File

@@ -1,14 +1,23 @@
package c_app
package d_app
import (
"github.com/go-playground/validator/v10"
"os"
"wdd.io/agent-common/logger"
"wdd.io/agent-common/utils"
"wdd.io/agent-operator/deploy/z_dep"
"wdd.io/agent-deploy/z_dep"
)
var log = logger.Log
var (
DefaultCmiiBackendConfig = &CmiiBackendConfig{}
DefaultCmiiFrontendConfig = &CmiiFrontendConfig{}
DefaultIngressConfig = &IngressConfig{
FrontendShortNameMaps: FrontendShortNameMaps,
BackendImageVersionMap: CmiiBackendAppMap,
}
log = logger.Log
)
type CmiiBackendConfig struct {
z_dep.CommonEnvironmentConfig
@@ -29,28 +38,19 @@ type CmiiFrontendConfig struct {
ClientId string
}
var (
DefaultCmiiBackendConfig = &CmiiBackendConfig{}
DefaultCmiiFrontendConfig = &CmiiFrontendConfig{}
BackendApplyFilePath = ""
FrontendApplyFilePath = ""
SRSApplyFilePath = ""
IngresApplyFilePath = ""
ConfigMapApplyFilePath = ""
)
type CmiiSrsConfig struct {
z_dep.CommonEnvironmentConfig
RtmpPort string
WebRTCPort string
SrtPort string
WebApiPort string
MySQLK8sAdminPassword string
}
func init() {
BackendApplyFilePath = z_dep.ApplyFilePrefix + "k8s-backend.yaml"
FrontendApplyFilePath = z_dep.ApplyFilePrefix + "k8s-frontend.yaml"
SRSApplyFilePath = z_dep.ApplyFilePrefix + "k8s-srs.yaml"
IngresApplyFilePath = z_dep.ApplyFilePrefix + "k8s-ingress.yaml"
ConfigMapApplyFilePath = z_dep.ApplyFilePrefix + "k8s-configmap.yaml"
log.DebugF("backend apply file path: %s\n", BackendApplyFilePath)
log.DebugF("frontend apply file path: %s\n", FrontendApplyFilePath)
log.DebugF("srs apply file path: %s\n", SRSApplyFilePath)
log.DebugF("ingress apply file path: %s\n", IngresApplyFilePath)
log.DebugF("config map apply file path: %s\n", ConfigMapApplyFilePath)
type IngressConfig struct {
z_dep.CommonEnvironmentConfig
FrontendShortNameMaps map[string]string
BackendImageVersionMap map[string]string `json:"backend_image_version_map,omitempty" validate:"required"`
}
func (backend *CmiiBackendConfig) BackendDeploy(common *z_dep.CommonEnvironmentConfig) bool {
@@ -65,16 +65,16 @@ func (backend *CmiiBackendConfig) BackendDeploy(common *z_dep.CommonEnvironmentC
return false
}
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendDeploymentTemplate, BackendApplyFilePath) {
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendDeploymentTemplate, z_dep.BackendApplyFilePath) {
return false
}
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendServiceTemplate, BackendApplyFilePath) {
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendServiceTemplate, z_dep.BackendApplyFilePath) {
return false
}
// pvc
if backend.NeedPvcCache {
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendPVCTemplate, BackendApplyFilePath) {
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendPVCTemplate, z_dep.BackendApplyFilePath) {
return false
}
}
@@ -94,10 +94,10 @@ func (frontend *CmiiFrontendConfig) FrontendDeploy(common *z_dep.CommonEnvironme
return false
}
if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendDeploymentTemplate, FrontendApplyFilePath) {
if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendDeploymentTemplate, z_dep.FrontendApplyFilePath) {
return false
}
if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendServiceTemplate, FrontendApplyFilePath) {
if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendServiceTemplate, z_dep.FrontendApplyFilePath) {
return false
}
@@ -115,19 +115,59 @@ func (frontend *CmiiFrontendConfig) ConfigMapDeploy(commonEnv *z_dep.CommonEnvir
return false
}
return z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendConfigMapTemplate, ConfigMapApplyFilePath)
return z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendConfigMapTemplate, z_dep.ConfigMapApplyFilePath)
}
func (ingress *IngressConfig) IngressDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, ingress)
// manual validate
if ingress.FrontendShortNameMaps == nil || len(ingress.FrontendShortNameMaps) == 0 {
log.Error("frontend short name is empty !")
return false
}
if ingress.BackendImageVersionMap == nil || len(ingress.BackendImageVersionMap) == 0 {
log.Error("backend image version map is empty !")
return false
}
if !z_dep.ParseEnvToApplyFile(ingress, CmiiFrontendIngressTemplate, z_dep.IngresApplyFilePath) {
return false
}
if !z_dep.ParseEnvToApplyFile(ingress, CmiiBackendIngressTemplate, z_dep.IngresApplyFilePath) {
return false
}
if !z_dep.ParseEnvToApplyFile(ingress, CmiiGatewayIngressTemplate, z_dep.IngresApplyFilePath) {
return false
}
return true
}
func (srsConfig *CmiiSrsConfig) SRSDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
utils.CopySameFields(commonEnv, srsConfig)
if !z_dep.ParseEnvToApplyFile(srsConfig, CmiiSrsTemplate, z_dep.SRSApplyFilePath) {
return false
}
return true
}
func IngressDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
if !commonEnv.ParseCommonEnvToApplyFile(CmiiFrontendIngressTemplate, IngresApplyFilePath) {
if !commonEnv.ParseCommonEnvToApplyFile(CmiiFrontendIngressTemplate, z_dep.IngresApplyFilePath) {
return false
}
if !commonEnv.ParseCommonEnvToApplyFile(CmiiBackendIngressTemplate, IngresApplyFilePath) {
if !commonEnv.ParseCommonEnvToApplyFile(CmiiBackendIngressTemplate, z_dep.IngresApplyFilePath) {
return false
}
if !commonEnv.ParseCommonEnvToApplyFile(CmiiGatewayIngressTemplate, IngresApplyFilePath) {
if !commonEnv.ParseCommonEnvToApplyFile(CmiiGatewayIngressTemplate, z_dep.IngresApplyFilePath) {
return false
}
@@ -135,10 +175,10 @@ func IngressDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
}
func SRSDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
os.Remove(SRSApplyFilePath)
return commonEnv.ParseCommonEnvToApplyFile(CmiiSrsTemplate, SRSApplyFilePath)
os.Remove(z_dep.SRSApplyFilePath)
return commonEnv.ParseCommonEnvToApplyFile(CmiiSrsTemplate, z_dep.SRSApplyFilePath)
}
func FrontendDefaultNginxDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiFrontendDefaultNginxConfTemplate, FrontendApplyFilePath)
return commonEnv.ParseCommonEnvToApplyFile(CmiiFrontendDefaultNginxConfTemplate, z_dep.FrontendApplyFilePath)
}

View File

@@ -1,4 +1,4 @@
package c_app
package d_app
import (
"testing"

View File

@@ -1,9 +1,9 @@
package c_app
package d_app
var FrontendShortNameMaps = map[string]string{
"cmii-suav-platform-supervision": "supervision",
"cmii-suav-platform-supervisionh5": "supervisionh5",
"cmii-uav-platform": "platform",
"cmii-uav-platform": "pangu",
"cmii-uav-platform-ai-brain": "ai-brain",
"cmii-uav-platform-armypeople": "armypeople",
"cmii-uav-platform-base": "base",
@@ -25,6 +25,11 @@ var FrontendShortNameMaps = map[string]string{
"cmii-uav-platform-threedsimulation": "threedsimulation",
"cmii-uav-platform-jiangsuwenlv": "jiangsuwenlv",
"cmii-uav-platform-qinghaitourism": "qinghaitourism",
"cmii-uav-platform-qingdao": "qingdao",
"cmii-uav-platform-hljtt": "hljtt",
"cmii-uav-platform-visualization": "visualization",
"cmii-uav-platform-uasms": "uasms",
"cmii-uav-platform-uas": "uas",
}
var FrontendClientIdMaps = map[string]string{
@@ -54,4 +59,7 @@ var FrontendClientIdMaps = map[string]string{
"cmii-uav-platform-visualization": "empty",
"cmii-uav-platform-traffic": "APP_Jc8i2wOQ1t73QEJS",
"cmii-uav-platform-jiangsuwenlv": "empty",
"cmii-uav-platform-hljtt": "empty",
"cmii-uav-platform-uasms": "empty",
"cmii-uav-platform-uas": "empty",
}

View File

@@ -1,4 +1,4 @@
package c_app
package d_app
const CmiiBackendDeploymentTemplate = `
apiVersion: apps/v1
@@ -35,12 +35,16 @@ spec:
- key: uavcloud.env
operator: In
values:
- demo
- {{ .TenantEnv }}
imagePullSecrets:
- name: harborsecret
containers:
- name: {{ .AppName }}
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }}
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }}
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}{{ .AppName }}:{{ .ImageTag }}
{{- end }}
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
@@ -80,7 +84,7 @@ spec:
cpu: 200m
livenessProbe:
httpGet:
path: /cmii/ping
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
@@ -90,7 +94,7 @@ spec:
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
@@ -100,7 +104,7 @@ spec:
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/ping
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60

View File

@@ -1,4 +1,4 @@
package c_app
package d_app
const CmiiFrontendDeploymentTemplate = `
apiVersion: apps/v1
@@ -30,7 +30,11 @@ spec:
- name: harborsecret
containers:
- name: {{ .AppName }}
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }}
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }}
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}{{ .AppName }}:{{ .ImageTag }}
{{- end }}
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
@@ -46,11 +50,11 @@ spec:
cpu: "1"
memory: 1Gi
requests:
cpu: 500m
memory: 500Mi
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /usr/local/nginx/conf/nginx.conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js

View File

@@ -1,4 +1,4 @@
package c_app
package d_app
const CmiiSrsTemplate = `
kind: ConfigMap
@@ -14,7 +14,7 @@ metadata:
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
listen {{ .RtmpPort }};
max_connections 4096;
srs_log_tank console;
srs_log_level info;
@@ -43,7 +43,7 @@ data:
}
rtc_server {
enabled on;
listen 30090;
listen {{ .WebRTCPort }};
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
@@ -85,7 +85,11 @@ data:
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
{{- if .WebPort }}
hls_entry_prefix http://{{ .WebIP }}:{{ .WebPort }};
{{- else }}
hls_entry_prefix http://{{ .WebIP }};
{{- end }}
}
}
---
@@ -103,27 +107,27 @@ spec:
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
nodePort: {{ .RtmpPort }}
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
nodePort: {{ .WebRTCPort }}
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
nodePort: {{ .WebRTCPort }}
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
nodePort: {{ .SrtPort }}
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
nodePort: {{ .WebApiPort }}
selector:
srs-role: rtc
type: NodePort
@@ -194,7 +198,6 @@ spec:
srs-role: rtc
template:
metadata:
creationTimestamp: null
labels:
srs-role: rtc
spec:
@@ -211,7 +214,11 @@ spec:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/srs:v5.0.195
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/srs:v5.0.195
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}srs:v5.0.195
{{- end }}
ports:
- name: srs-rtmp
containerPort: 30935
@@ -236,8 +243,8 @@ spec:
value: {{ .WebIP }}
resources:
limits:
cpu: 1200m
memory: 6Gi
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
@@ -255,7 +262,11 @@ spec:
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/cmii-srs-oss-adaptor:2023-SA
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/cmii-srs-oss-adaptor:2023-SA
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}cmii-srs-oss-adaptor:2023-SA
{{- end }}
env:
- name: OSS_ENDPOINT
value: 'http://{{ .MinioInnerIP }}:9000'
@@ -272,7 +283,7 @@ spec:
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
value: {{ .MySQLK8sAdminPassword }}
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
@@ -283,7 +294,7 @@ spec:
value: 'yes'
resources:
limits:
cpu: 1200m
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
@@ -332,7 +343,6 @@ spec:
live-role: op-v2
template:
metadata:
creationTimestamp: null
labels:
live-role: op-v2
spec:
@@ -346,7 +356,11 @@ spec:
defaultMode: 420
containers:
- name: helm-live-op-v2
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/cmii-live-operator:5.2.0
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/cmii-live-operator:5.2.0
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}cmii-live-operator:5.2.0
{{- end }}
ports:
- name: operator
containerPort: 8080
@@ -364,7 +378,7 @@ spec:
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/ping
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
@@ -374,7 +388,7 @@ spec:
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
@@ -485,14 +499,13 @@ data:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://{{ .WebIP }}:30935'
rtmp: 'rtmp://{{ .WebIP }}:{{ .RtmpPort }}'
rtsp: 'rtsp://{{ .WebIP }}:30554'
srt: 'srt://{{ .WebIP }}:30556'
srt: 'srt://{{ .WebIP }}:{{ .SrtPort }}'
flv: 'http://{{ .WebIP }}:30500'
hls: 'http://{{ .WebIP }}:30500'
rtc: 'webrtc://{{ .WebIP }}:30557'
rtc: 'webrtc://{{ .WebIP }}:{{ .WebRTCPort }}'
replay: 'https://{{ .WebIP }}:30333'
minio:
endpoint: http://{{ .MinioInnerIP }}:9000

View File

@@ -0,0 +1,207 @@
package d_app
const CmiiFrontendConfigMapTemplate = `
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-{{ .ShortName }}
namespace: {{ .Namespace }}
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "{{ .TenantEnv }}",
{{- if .WebPort }}
CloudHOST: "{{ .WebIP }}:{{ .WebPort }}",
{{- else }}
CloudHOST: "{{ .WebIP }}",
{{- end }}
{{- if eq .ShortName "pangu" }}
ApplicationShortName: "",
{{- else }}
ApplicationShortName: "{{ .ShortName }}",
{{- end }}
AppClientId: "{{ .ClientId }}"
}
`
const CmiiFrontendDefaultNginxConfTemplate = `
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: {{ .Namespace }}
labels:
cmii.type: frontend
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
`
const CmiiFrontendIngressTemplate = `
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: {{ .Namespace }}
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
{{- range $key, $value := .FrontendShortNameMaps }}
rewrite ^(/{{ $value }})$ $1/ redirect;
{{- end }}
spec:
rules:
- host: fake-domain.{{ .Namespace }}.io
http:
paths:
{{- if .TenantEnv }}
{{- $tenantEnv := .TenantEnv }}
- path: /{{ $tenantEnv }}/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
{{- range $key, $value := .FrontendShortNameMaps }}
- path: /{{ $tenantEnv }}/{{ $value }}/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 9528
{{- end }}
{{- else }}
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
{{- range $key, $value := .FrontendShortNameMaps }}
- path: /{{ $value }}/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 9528
{{- end }}
{{- end }}
`
const CmiiBackendIngressTemplate = `
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: {{ .Namespace }}
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
{{- if .TenantEnv }}
{{- $tenantEnv := .TenantEnv }}
{{- range $key, $value := .BackendImageVersionMap }}
- host: {{ $key }}.uavcloud-{{ $tenantEnv }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 8080
{{- end }}
{{- else }}
{{- range $key, $value := .BackendImageVersionMap }}
- host: {{ $key }}.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 8080
{{- end }}
{{- end }}
`
const CmiiGatewayIngressTemplate = `
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: {{ .Namespace }}
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.{{ .Namespace }}.io
http:
paths:
{{- if .TenantEnv }}
{{- $tenantEnv := .TenantEnv }}
- path: /{{ $tenantEnv }}/oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /{{ $tenantEnv }}/open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /{{ $tenantEnv }}/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
{{- else }}
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
{{- end }}
`

View File

@@ -0,0 +1,184 @@
package e_cmii
import (
"encoding/base64"
"wdd.io/agent-deploy/c_middle"
"wdd.io/agent-deploy/d_app"
"wdd.io/agent-deploy/z_dep"
)
type CmiiEnvConfig struct {
c_middle.MySQlConfig
c_middle.RedisConfig
c_middle.MongoConfig
c_middle.RabbitMQConfig
c_middle.NacosConfig
c_middle.EmqxConfig
d_app.CmiiFrontendConfig
d_app.CmiiBackendConfig
d_app.CmiiSrsConfig
}
var CmiiOutSideConfig = &CmiiEnvConfig{
MySQlConfig: c_middle.MySQlConfig{
MySQLNodePort: "33306",
MySQLRootPassword: "QzfXQhd3bQ",
MySQLRootPasswordBase64: base64.StdEncoding.EncodeToString([]byte("QzfXQhd3bQ")),
MySQLK8sAdminPassword: "fP#UaH6qQ3)8",
},
RedisConfig: c_middle.RedisConfig{},
MongoConfig: c_middle.MongoConfig{
MongoPassword: "REdPza8#oVlt",
MongoNodePort: "37017",
},
RabbitMQConfig: c_middle.RabbitMQConfig{
CommonEnvironmentConfig: z_dep.CommonEnvironmentConfig{},
RabbitNodePort: "35672",
RabbitDashboardNodePort: "36675",
RabbitPassword: "nYcRN91r._hj",
RabbitPasswordBase64: "blljUk45MXIuX2hq",
},
NacosConfig: c_middle.NacosConfig{
MySQLK8sAdminPassword: "fP#UaH6qQ3)8",
NacosNodePort: "38848",
},
EmqxConfig: c_middle.EmqxConfig{
EmqxNodePort: "31883",
EmqxDashboardNodePort: "38085",
EmqxWebSocketNodePort: "38083",
EmqxPassword: "odD8#Ve7.B",
},
CmiiFrontendConfig: d_app.CmiiFrontendConfig{},
CmiiBackendConfig: d_app.CmiiBackendConfig{},
CmiiSrsConfig: d_app.CmiiSrsConfig{
RtmpPort: "30935",
WebRTCPort: "30090",
SrtPort: "30556",
WebApiPort: "30557",
MySQLK8sAdminPassword: "fP#UaH6qQ3)8",
},
}
var CmiiDevConfig = &CmiiEnvConfig{
MySQlConfig: c_middle.MySQlConfig{
MySQLNodePort: "33306",
MySQLRootPassword: "Gwubc6CxRM",
MySQLRootPasswordBase64: "R3d1YmM2Q3hSTQ==",
MySQLK8sAdminPassword: "VFJncwy58^Zm",
},
RedisConfig: c_middle.RedisConfig{},
MongoConfig: c_middle.MongoConfig{
MongoPassword: "7(#dD3zcz8",
MongoNodePort: "37017",
},
RabbitMQConfig: c_middle.RabbitMQConfig{
CommonEnvironmentConfig: z_dep.CommonEnvironmentConfig{},
RabbitNodePort: "35672",
RabbitDashboardNodePort: "36675",
RabbitPassword: "7v&7#w1ef)T-",
RabbitPasswordBase64: "N3YmNyN3MWVmKVQt",
},
NacosConfig: c_middle.NacosConfig{
MySQLK8sAdminPassword: "VFJncwy58^Zm",
NacosNodePort: "33848",
},
EmqxConfig: c_middle.EmqxConfig{
EmqxNodePort: "31883",
EmqxDashboardNodePort: "38085",
EmqxWebSocketNodePort: "38083",
EmqxPassword: "4YPk*DS%+5",
},
CmiiFrontendConfig: d_app.CmiiFrontendConfig{},
CmiiBackendConfig: d_app.CmiiBackendConfig{},
CmiiSrsConfig: d_app.CmiiSrsConfig{
RtmpPort: "30935",
WebRTCPort: "30090",
SrtPort: "30556",
WebApiPort: "30557",
MySQLK8sAdminPassword: "VFJncwy58^Zm",
},
}
var CmiiDevFlightConfig = &CmiiEnvConfig{
MySQlConfig: c_middle.MySQlConfig{
MySQLNodePort: "33307",
MySQLRootPassword: "Gwubc6CxRM",
MySQLRootPasswordBase64: "R3d1YmM2Q3hSTQ==",
MySQLK8sAdminPassword: "VFJncwy58^Zm",
},
RedisConfig: c_middle.RedisConfig{},
MongoConfig: c_middle.MongoConfig{
MongoPassword: "7(#dD3zcz8",
MongoNodePort: "37018",
},
RabbitMQConfig: c_middle.RabbitMQConfig{
CommonEnvironmentConfig: z_dep.CommonEnvironmentConfig{},
RabbitNodePort: "35673",
RabbitDashboardNodePort: "36676",
RabbitPassword: "7v&7#w1ef)T-",
RabbitPasswordBase64: "N3YmNyN3MWVmKVQt",
},
NacosConfig: c_middle.NacosConfig{
MySQLK8sAdminPassword: "VFJncwy58^Zm",
NacosNodePort: "33849",
},
EmqxConfig: c_middle.EmqxConfig{
EmqxNodePort: "31884",
EmqxDashboardNodePort: "38086",
EmqxWebSocketNodePort: "38084",
EmqxPassword: "4YPk*DS%+5",
},
CmiiFrontendConfig: d_app.CmiiFrontendConfig{},
CmiiBackendConfig: d_app.CmiiBackendConfig{},
CmiiSrsConfig: d_app.CmiiSrsConfig{
RtmpPort: "30936",
WebRTCPort: "30091",
SrtPort: "30558",
WebApiPort: "30559",
MySQLK8sAdminPassword: "VFJncwy58^Zm",
},
}
var CmiiDevOperationConfig = &CmiiEnvConfig{
MySQlConfig: c_middle.MySQlConfig{
MySQLNodePort: "33308",
MySQLRootPassword: "Gwubc6CxRM",
MySQLRootPasswordBase64: "R3d1YmM2Q3hSTQ==",
MySQLK8sAdminPassword: "VFJncwy58^Zm",
},
RedisConfig: c_middle.RedisConfig{},
MongoConfig: c_middle.MongoConfig{
MongoPassword: "7(#dD3zcz8",
MongoNodePort: "37019",
},
RabbitMQConfig: c_middle.RabbitMQConfig{
CommonEnvironmentConfig: z_dep.CommonEnvironmentConfig{},
RabbitNodePort: "35674",
RabbitDashboardNodePort: "36677",
RabbitPassword: "7v&7#w1ef)T-",
RabbitPasswordBase64: "N3YmNyN3MWVmKVQt",
},
NacosConfig: c_middle.NacosConfig{
MySQLK8sAdminPassword: "VFJncwy58^Zm",
NacosNodePort: "33850",
},
EmqxConfig: c_middle.EmqxConfig{
EmqxNodePort: "31885",
EmqxDashboardNodePort: "38087",
EmqxWebSocketNodePort: "38085",
EmqxPassword: "4YPk*DS%+5",
},
CmiiFrontendConfig: d_app.CmiiFrontendConfig{},
CmiiBackendConfig: d_app.CmiiBackendConfig{},
CmiiSrsConfig: d_app.CmiiSrsConfig{
RtmpPort: "30937",
WebRTCPort: "30092",
SrtPort: "30560",
WebApiPort: "30561",
MySQLK8sAdminPassword: "VFJncwy58^Zm",
},
}
var CmiiIntegrationConfig = &CmiiEnvConfig{}
var CmiiUatConfig = &CmiiEnvConfig{}
var CmiiValidationConfig = &CmiiEnvConfig{}
var CmiiDemoConfig = &CmiiEnvConfig{}

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
cmlc_app_image_list="cmlc-app-images-4.1.6.txt" # 需要修改版本
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
middleware_image_list="middleware-images.txt" # 一般不需要修改
#DockerRegisterDomain="20.47.129.116:8033" # 需要根据实际修改
DockerRegisterDomain="harbor.cdcyy.com.cn" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
downloadAllNeededImages() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
shift
done
}
downloadAllNeededImagesAndCompress() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout >${compressPacName}
shift
done
echo "已经完成打包工作!"
}
pushRKEImageToHarbor() {
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}")
done <"${rancher_image_list}"
# docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
case $i in
*/*)
image_name="${DockerRegisterDomain}/${i}"
;;
*)
image_name="${DockerRegisterDomain}/rancher/${i}"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${i}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushCMLCAPPImageToHarbor() {
app_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
app_images+=("${i}")
done <"${cmlc_app_image_list}"
docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn
# docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${app_images[@]}"; do
[ -z "${app}" ] && continue
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushMiddlewareImageToHarbor() {
middleware_image=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
middleware_image+=("${i}")
done <"${middleware_image_list}"
# docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn
for app in "${middleware_image[@]}"; do
[ -z "${app}" ] && continue
case ${app} in
*/*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
;;
*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
#downloadAllNeededImagesAndCompress "${middleware_image_list}"
downloadAllNeededImages "${middleware_image_list}"
#pushRKEImageToHarbor
#pushCMLCAPPImageToHarbor
pushMiddlewareImageToHarbor

View File

@@ -0,0 +1,164 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cmlc-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cmlc-nfs-client-provisioner-runner
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- create
- delete
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- apiGroups:
- extensions
resourceNames:
- nfs-provisioner
resources:
- podsecuritypolicies
verbs:
- use
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- create
- update
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cmlc-run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: cmlc-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
name: cmlc-nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cmlc-leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cmlc-leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: cmlc-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: cmlc-leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-client-provisioner
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmlc-nfs-client-provisioner
labels:
app: cmlc-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: cmlc-nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: cmlc-nfs-client-provisioner
spec:
serviceAccountName: cmlc-nfs-client-provisioner
imagePullSecrets:
- name: harborsecret
containers:
- name: cmlc-nfs-client-provisioner
image: harbor.cdcyy.com.cn/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-client-provisioner
- name: NFS_SERVER
value: yfcsnfs.com
- name: NFS_PATH
value: /drone/zyly
volumes:
- name: nfs-client-root
nfs:
server: yfcsnfs.com
path: /drone/zyly

View File

@@ -0,0 +1,9 @@
kind: Secret
apiVersion: v1
metadata:
name: harborsecret
namespace: uavcloud-devoperation
data:
.dockerconfigjson: >-
ewoJImF1dGhzIjogewoJCSJoYXJib3ItcWEuc3JlLmNkY3l5LmNuIjogewoJCQkiYXV0aCI6ICJjbUZrTURKZlpISnZibVU2UkhKdmJtVkFNVEl6TkE9PSIKCQl9LAogICAgICAgICAgICAgICAgImhhcmJvci5jZGN5eS5jb20uY24iOiB7CgkJCSJhdXRoIjogImNtRmtNREpmWkhKdmJtVTZSSEp2Ym1WQU1USXpOQT09IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy45IChsaW51eCkiCgl9Cn0=
type: kubernetes.io/dockerconfigjson

23
agent-deploy/go.mod Executable file
View File

@@ -0,0 +1,23 @@
module wdd.io/agent-deploy
go 1.22.1
require (
github.com/go-playground/validator/v10 v10.21.0
wdd.io/agent-common v0.0.0
)
require (
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/net v0.24.0 // indirect
golang.org/x/sys v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
)
replace wdd.io/agent-common => ../agent-common

34
agent-deploy/go.sum Executable file
View File

@@ -0,0 +1,34 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.21.0 h1:4fZA11ovvtkdgaeev9RGWPgc1uj3H8W+rNYyH/ySBb0=
github.com/go-playground/validator/v10 v10.21.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,378 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}

View File

@@ -0,0 +1,265 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: uavcloud-dev
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "uavcloud-dev"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
emqx_auth_username.conf: |-
auth.user.1.username = cmlc
auth.user.1.password = 4YPk*DS%+5
auth.user.password_hash = sha256
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_username,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
affinity: { }
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: harbor.cdcyy.com.cn/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: { }
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-dev
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-dev
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: uavcloud-dev
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,578 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: uavcloud-dev
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
spec:
rules:
- host: fake-domain.uavcloud-dev.io
http:
paths:
- path: /dev/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /dev/supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /dev/supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /dev/pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /dev/ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /dev/armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /dev/base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /dev/cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /dev/detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dev/emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /dev/hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /dev/jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /dev/logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /dev/media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /dev/multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /dev/mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /dev/oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /dev/open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /dev/qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /dev/qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /dev/security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /dev/securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /dev/seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /dev/share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /dev/splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /dev/threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /dev/traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /dev/visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: uavcloud-dev
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-dev.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: uavcloud-dev
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.uavcloud-dev.io
http:
paths:
- path: /dev/oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /dev/open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /dev/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,77 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: uavcloud-dev
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: uavcloud-dev
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: helm-mongo
image: harbor.cdcyy.com.cn/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: 7(#dD3zcz8
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,423 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
annotations: { }
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "R3d1YmM2Q3hSTQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create
user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create
user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all
on *.* to zyly_qc@'%';
create
user k8s_admin@'%' identified by 'VFJncwy58^Zm';
grant all
on *.* to k8s_admin@'%';
create
user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all
on *.* to audit_dba@'%';
create
user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT
SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT
on *.* to db_backup@'%';
create
user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION
CLIENT on *.* to monitor@'%';
flush
privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: { }
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: harbor.cdcyy.com.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: harbor.cdcyy.com.cn/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: { }
requests: { }
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/uavcloud-dev/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: uavcloud-dev
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "VFJncwy58^Zm"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: uavcloud-dev
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 33848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: uavcloud-dev
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: nacos-server
image: harbor.cdcyy.com.cn/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "N3YmNyN3MWVmKVQt"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = 7v&7#w1ef)T-
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-dev
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-dev
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-dev
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: { }
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: harbor.cdcyy.com.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: { }
requests: { }
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: harbor.cdcyy.com.cn/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: { }
requests: { }
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.uavcloud-dev.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

View File

@@ -0,0 +1,495 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: uavcloud-dev
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://192.168.35.178;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: harbor.cdcyy.com.cn/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 192.168.35.178
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: uavcloud-dev/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: uavcloud-dev/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: https://minio-ig-dev.uavcmlc.com
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-srs-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: VFJncwy58^Zm
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: uavcloud-dev/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op-v2
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: uavcloud-dev
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.6.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.6.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.6.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://192.168.35.178:30935'
rtsp: 'rtsp://192.168.35.178:30554'
srt: 'srt://192.168.35.178:30556'
flv: 'http://192.168.35.178:30500'
hls: 'http://192.168.35.178:30500'
rtc: 'webrtc://192.168.35.178:30090'
replay: 'https://192.168.35.178:30333'
minio:
endpoint: https://minio-ig-dev.uavcmlc.com
access-key: cmii
secret-key: B#923fC7mk
bucket: live-srs-hls

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,378 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: uavcloud-dev
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "dev",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}

View File

@@ -0,0 +1,265 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: uavcloud-dev
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "uavcloud-dev"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
emqx_auth_username.conf: |-
auth.user.1.username = cmlc
auth.user.1.password = 4YPk*DS%+5
auth.user.password_hash = sha256
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_username,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
affinity: { }
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: harbor.cdcyy.com.cn/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: { }
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-dev
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-dev
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: uavcloud-dev
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: uavcloud-dev
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,188 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: uavcloud-dev
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
spec:
rules:
- host: fake-domain.uavcloud-dev.io
http:
paths:
- path: /dev/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /dev/supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /dev/supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /dev/pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /dev/ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /dev/armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /dev/base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /dev/cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /dev/detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dev/emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /dev/hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /dev/jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /dev/logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /dev/media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /dev/multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /dev/mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /dev/oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /dev/open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /dev/qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /dev/qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /dev/security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /dev/securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /dev/seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /dev/share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /dev/splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /dev/threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /dev/traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /dev/visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528

View File

@@ -0,0 +1,77 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: uavcloud-dev
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: uavcloud-dev
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: helm-mongo
image: harbor.cdcyy.com.cn/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: 7(#dD3zcz8
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,423 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
annotations: { }
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "R3d1YmM2Q3hSTQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create
user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create
user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all
on *.* to zyly_qc@'%';
create
user k8s_admin@'%' identified by 'VFJncwy58^Zm';
grant all
on *.* to k8s_admin@'%';
create
user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all
on *.* to audit_dba@'%';
create
user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT
SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT
on *.* to db_backup@'%';
create
user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION
CLIENT on *.* to monitor@'%';
flush
privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: { }
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: harbor.cdcyy.com.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: harbor.cdcyy.com.cn/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: { }
requests: { }
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/uavcloud-dev/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: uavcloud-dev
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "VFJncwy58^Zm"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: uavcloud-dev
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 33848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: uavcloud-dev
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: nacos-server
image: harbor.cdcyy.com.cn/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "N3YmNyN3MWVmKVQt"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = 7v&7#w1ef)T-
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-dev
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-dev
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-dev
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: { }
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: harbor.cdcyy.com.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: { }
requests: { }
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: harbor.cdcyy.com.cn/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: { }
requests: { }
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: uavcloud-dev
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-dev
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.uavcloud-dev.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,406 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}

View File

@@ -0,0 +1,265 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: uavcloud-devflight
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: uavcloud-devflight
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "uavcloud-devflight"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: uavcloud-devflight
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
emqx_auth_username.conf: |-
auth.user.1.username = cmlc
auth.user.1.password = 4YPk*DS%+5
auth.user.password_hash = sha256
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_username,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: uavcloud-devflight
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
affinity: { }
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: harbor.cdcyy.com.cn/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: { }
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-devflight
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-devflight
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: uavcloud-devflight
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: uavcloud-devflight
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31884
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38086
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38084
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: uavcloud-devflight
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,77 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: uavcloud-devflight
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: uavcloud-devflight
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: helm-mongo
image: harbor.cdcyy.com.cn/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: 7(#dD3zcz8
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

Some files were not shown because too many files have changed in this diff Show More