[Agent][Deploy] - add zjyd

This commit is contained in:
zeaslity
2024-08-20 09:35:32 +08:00
parent 1828116d8a
commit 40b540f082
252 changed files with 21873 additions and 25482 deletions

View File

@@ -5,15 +5,18 @@ import (
"os"
"path/filepath"
"strings"
image2 "wdd.io/agent-common/image"
"wdd.io/agent-common/utils"
agentdeploy "wdd.io/agent-deploy"
"wdd.io/agent-deploy/z_dep"
"wdd.io/agent-operator/image"
)
const DeployFilePrefix = "/home/wdd/IdeaProjects/ProjectOctopus/agent-common/real_project/"
func CmiiEnvDeploy(deployCommonEnv *z_dep.CommonEnvironmentConfig, shouldDoCompleteDeploy bool, backupFromEnv string) {
folderPrefix := "/home/wdd/IdeaProjects/ProjectOctopus/agent-deploy/" + deployCommonEnv.Namespace + "/"
folderPrefix := DeployFilePrefix + deployCommonEnv.Namespace + "/"
tenantEnv := deployCommonEnv.Namespace
// uavcloud-devflight ==> devflight
@@ -73,8 +76,60 @@ func CmiiEnvDeploy(deployCommonEnv *z_dep.CommonEnvironmentConfig, shouldDoCompl
}
func CmiiEnvDeployOffline(deployCommonEnv *z_dep.CommonEnvironmentConfig, shouldDoCompleteDeploy bool, allCmiiImageList []string) {
folderPrefix := DeployFilePrefix + deployCommonEnv.Namespace + "/"
tenantEnv := deployCommonEnv.Namespace
// uavcloud-devflight ==> devflight
// uavcloud-dev ==> dev
if strings.Contains(tenantEnv, "-") {
split := strings.Split(tenantEnv, "-")
tenantEnv = split[len(split)-1]
} else {
// demo ==> ""
// cqlyj ==> ""
tenantEnv = ""
}
// assign folder prefix
deployCommonEnv.ApplyFilePrefix = folderPrefix
deployCommonEnv.TenantEnv = tenantEnv
var backendMap map[string]string
var frontendMap map[string]string
var srsMap map[string]string
// 输出特定版本的Tag
cmiiImageVersionMap := image2.CmiiImageMapFromImageFullNameList(allCmiiImageList)
frontendMap, backendMap, srsMap = image2.FrontendBackendSrsImageMapFromCmiiImageMap(cmiiImageVersionMap)
utils.BeautifulPrintWithTitle(backendMap, "backendMap")
utils.BeautifulPrintWithTitle(frontendMap, "frontendMap")
utils.BeautifulPrintWithTitle(srsMap, "srsMap")
// get the apply file path
deployCommonEnv.GenerateApplyFilePath()
// do generate all application files
// generate and get all old stuff
agentdeploy.CmiiEnvironmentDeploy(shouldDoCompleteDeploy, deployCommonEnv, backendMap, frontendMap)
// test
//GetNodeWideByKubectl(deployNamespace)
// clear old apply file
//clearOldApplyStuff(common, shouldDoCompleteDeploy)
// apply new app
//applyNewAppStuff(common, shouldDoCompleteDeploy)
fmt.Println()
fmt.Println("-------------------- all done ---------------------")
fmt.Println()
}
func CmiiNewAppDeploy(deployCommonEnv *z_dep.CommonEnvironmentConfig, newAppNamespace string) {
folderPrefix := "/home/wdd/IdeaProjects/ProjectOctopus/agent-deploy/" + deployCommonEnv.Namespace + "/"
folderPrefix := DeployFilePrefix + deployCommonEnv.Namespace + "/"
tenantEnv := deployCommonEnv.Namespace
// uavcloud-devflight ==> devflight

View File

@@ -3,6 +3,7 @@ package main
import (
"testing"
image2 "wdd.io/agent-common/image"
"wdd.io/agent-common/real_project/zhejianyidong_erjipingtai"
"wdd.io/agent-deploy/z_dep"
)
@@ -61,6 +62,42 @@ func TestCmiiEnvDeploy_LiuXiTongGan(t *testing.T) {
}
func TestCmiiEnvDeploy_ZheJiangYiDongErJiPingTai(t *testing.T) {
// 浙江移动二级平台
commonEnv := &z_dep.CommonEnvironmentConfig{
WebIP: "111.2.224.59",
WebPort: "8088",
HarborIPOrCustomImagePrefix: "192.168.10.3",
HarborPort: "8033",
Namespace: "zjyd",
TagVersion: "5.7.0",
NFSServerIP: "192.168.10.3",
MinioInnerIP: "192.168.10.2",
}
CmiiEnvDeployOffline(commonEnv, true, zhejianyidong_erjipingtai.Cmii570ImageList)
}
func TestCmiiEnvDeploy_JiangSuNanTong(t *testing.T) {
// 江苏南通
commonEnv := &z_dep.CommonEnvironmentConfig{
WebIP: "111.2.224.59",
WebPort: "8088",
HarborIPOrCustomImagePrefix: "192.168.10.3",
HarborPort: "8033",
Namespace: "zjyd",
TagVersion: "5.7.0",
NFSServerIP: "192.168.10.3",
MinioInnerIP: "192.168.10.2",
}
CmiiEnvDeployOffline(commonEnv, true, zhejianyidong_erjipingtai.Cmii570ImageList)
}
func TestCmiiNewAppDeploy(t *testing.T) {
deployNamespace := devOperation

View File

@@ -11,14 +11,14 @@ import (
)
var DefaultCmiiOperator = CmiiK8sOperator{}
var updateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt"
var UpdateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt"
func init() {
switch runtime.GOOS {
case "linux":
updateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt"
UpdateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/cmii-update-log.txt"
case "windows":
updateLogPath = "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\agent-operator\\log\\cmii-update-log.txt"
UpdateLogPath = "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\agent-operator\\log\\cmii-update-log.txt"
}
}
@@ -303,11 +303,12 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) (updateOK boo
if cmiiDeploymentInterface == nil {
return updateOK, oldImageTag, newImageTag
}
// check if need to update
oldImageTag = cmiiDeploymentInterface.ImageTag
if oldImageTag == newTag {
log.DebugF("[UpdateCmiiDeploymentImageTag] - [%s] [%s] image tag are the same ! no need to update !", cmiiEnv, appName)
// restart
// restart deployment
if DefaultCmiiOperator.DeploymentRestart(cmiiEnv, appName) {
return true, oldImageTag, oldImageTag
} else {
@@ -329,8 +330,8 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) (updateOK boo
return false, oldImageTag, newImageTag
}
// append log
utils.AppendContentToFile(content, updateLogPath)
// append update log
utils.AppendContentToFile(content, UpdateLogPath)
// re-get from env
time.Sleep(time.Second)
@@ -340,6 +341,7 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) (updateOK boo
return false, oldImageTag, newImageTag
}
// result
return true, oldImageTag, deploy.ImageTag
}
@@ -377,7 +379,7 @@ func UpdateCmiiImageTagFromNameTagList(cmiiEnv string, nameTagList []string) (re
func RollBackCmiiDeploymentFromUpdateLog(updateLog string) bool {
//if !executor.BasicFindContentInFile(updateLog, updateLogPath) {
//if !executor.BasicFindContentInFile(updateLog, UpdateLogPath) {
// log.ErrorF("[RollBackCmiiDeploymentFromUpdateLog] - [%s] no this update log ! use update instead ! => ", updateLog)
// return false
//}
@@ -734,6 +736,7 @@ func FilterAllCmiiNodeSoft(nodeList []CmiiNodeInterface) (result []CmiiNodeInter
return result
}
// AppNameBelongsToCmiiImage 根据CmiiBackendAppMap CmiiFrontendAppMap判断一个appName是否属于CMII
func AppNameBelongsToCmiiImage(appName string) bool {
_, ok := d_app.CmiiBackendAppMap[appName]
if !ok {

View File

@@ -141,78 +141,78 @@ func TestBackupAllCmiiDeploymentToMap(t *testing.T) {
}
func TestBackupAllCmiiDeploymentToList(t *testing.T) {
allCmiiImageList := BackupAllCmiiDeploymentToList(demo, false)
allCmiiImageList := BackupAllCmiiDeploymentToList(demo, true)
utils.BeautifulPrint(allCmiiImageList)
}
// Update DEMO by Tag Update
func TestUpdateCmiiImageTagFromNameTagMap(t *testing.T) {
cmii530BackendMap := map[string]string{
"cmii-admin-data": "5.3.0",
"cmii-admin-gateway": "5.3.0",
"cmii-admin-user": "5.3.0",
"cmii-open-gateway": "5.3.0",
"cmii-suav-supervision": "5.3.0",
"cmii-uav-airspace": "5.3.0",
"cmii-uav-alarm": "5.3.0",
"cmii-uav-brain": "5.3.0",
"cmii-uav-cloud-live": "5.3.0",
"cmii-uav-cms": "5.3.0",
"cmii-uav-data-post-process": "5.3.0",
"cmii-uav-developer": "5.3.0",
"cmii-uav-device": "5.3.0",
"cmii-uav-emergency": "5.3.0",
"cmii-uav-gateway": "5.3.0",
"cmii-uav-gis-server": "5.3.0",
"cmii-uav-industrial-portfolio": "5.3.0",
"cmii-uav-integration": "5.3.0",
"cmii-uav-logger": "5.3.0",
"cmii-uav-material-warehouse": "5.3.0",
"cmii-uav-mission": "5.3.0",
"cmii-uav-mqtthandler": "5.3.0",
"cmii-uav-notice": "5.3.0",
"cmii-uav-oauth": "5.3.0",
"cmii-uav-process": "5.3.0",
"cmii-uav-surveillance": "5.3.0",
"cmii-uav-threedsimulation": "5.3.0",
"cmii-uav-tower": "5.3.0",
"cmii-uav-user": "5.3.0",
"cmii-uav-waypoint": "5.3.0",
//"cmii-uav-grid-datasource": "5.2.0-24810",
//"cmii-uav-grid-engine": "5.1.0",
//"cmii-uav-grid-manage": "5.1.0",
"cmii-admin-data": "5.7.0",
"cmii-admin-gateway": "5.7.0",
"cmii-admin-user": "5.7.0",
"cmii-open-gateway": "5.7.0",
"cmii-suav-supervision": "5.7.0",
"cmii-uav-airspace": "5.7.0",
"cmii-uav-alarm": "5.7.0",
"cmii-uav-brain": "5.7.0",
"cmii-uav-cloud-live": "5.7.0",
"cmii-uav-cms": "5.7.0",
"cmii-uav-data-post-process": "5.7.0",
"cmii-uav-developer": "5.7.0",
"cmii-uav-device": "5.7.0",
"cmii-uav-emergency": "5.7.0",
"cmii-uav-gateway": "5.7.0",
"cmii-uav-gis-server": "5.7.0",
"cmii-uav-industrial-portfolio": "5.7.0",
"cmii-uav-integration": "5.7.0",
"cmii-uav-logger": "5.7.0",
"cmii-uav-material-warehouse": "5.7.0",
"cmii-uav-mission": "5.7.0",
"cmii-uav-mqtthandler": "5.7.0",
"cmii-uav-notice": "5.7.0",
"cmii-uav-oauth": "5.7.0",
"cmii-uav-process": "5.7.0",
"cmii-uav-surveillance": "5.7.0",
"cmii-uav-threedsimulation": "5.7.0",
"cmii-uav-tower": "5.7.0",
"cmii-uav-user": "5.7.0",
"cmii-uav-waypoint": "5.7.0",
"cmii-uav-sense-adapter": "5.7.0",
"cmii-uav-multilink": "5.7.0",
}
cmii530FrontendMap := map[string]string{
"cmii-suav-platform-supervision": "5.3.0",
"cmii-suav-platform-supervisionh5": "5.3.0",
"cmii-uav-platform": "5.3.0",
"cmii-uav-platform-ai-brain": "5.3.0",
"cmii-uav-platform-armypeople": "5.3.0",
"cmii-uav-platform-base": "5.3.0",
"cmii-uav-platform-cms-portal": "5.3.0",
"cmii-uav-platform-detection": "5.3.0",
"cmii-uav-platform-emergency-rescue": "5.3.0",
"cmii-uav-platform-logistics": "5.3.0",
"cmii-uav-platform-media": "5.3.0",
"cmii-uav-platform-multiterminal": "5.3.0",
"cmii-uav-platform-mws": "5.3.0",
"cmii-uav-platform-oms": "5.3.0",
"cmii-uav-platform-open": "5.3.0",
"cmii-uav-platform-securityh5": "5.3.0",
"cmii-uav-platform-seniclive": "5.3.0",
"cmii-uav-platform-share": "5.3.0",
"cmii-uav-platform-splice": "5.3.0",
"cmii-uav-platform-threedsimulation": "5.3.0",
"cmii-uav-platform-visualization": "5.3.0",
"cmii-suav-platform-supervision": "5.7.0",
"cmii-suav-platform-supervisionh5": "5.7.0",
"cmii-uav-platform": "5.7.0",
"cmii-uav-platform-ai-brain": "5.7.0",
"cmii-uav-platform-armypeople": "5.7.0",
//"cmii-uav-platform-base": "5.7.0",
"cmii-uav-platform-cms-portal": "5.7.0",
//"cmii-uav-platform-detection": "5.7.0",
//"cmii-uav-platform-emergency-rescue": "5.7.0",
//"cmii-uav-platform-logistics": "5.7.0",
"cmii-uav-platform-media": "5.7.0",
//"cmii-uav-platform-multiterminal": "5.7.0",
"cmii-uav-platform-mws": "5.7.0",
"cmii-uav-platform-oms": "5.7.0",
"cmii-uav-platform-open": "5.7.0",
"cmii-uav-platform-securityh5": "5.7.0",
//"cmii-uav-platform-seniclive": "5.7.0",
"cmii-uav-platform-share": "5.7.0",
//"cmii-uav-platform-splice": "5.7.0",
//"cmii-uav-platform-threedsimulation": "5.7.0",
//"cmii-uav-platform-visualization": "5.7.0",
//"cmii-uav-platform-security": "4.1.6",
}
result := UpdateCmiiImageTagFromNameTagMap("demo", cmii530BackendMap)
result := UpdateCmiiImageTagFromNameTagMap(demo, cmii530BackendMap)
utils.BeautifulPrint(result)
result = UpdateCmiiImageTagFromNameTagMap("demo", cmii530FrontendMap)
result = UpdateCmiiImageTagFromNameTagMap(demo, cmii530FrontendMap)
utils.BeautifulPrint(result)
}
@@ -253,7 +253,7 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) {
// 计算20:00的时间
now := time.Now()
targetTime := time.Date(now.Year(), now.Month(), now.Day(), 14, 22, 00, 0, now.Location())
targetTime := time.Date(now.Year(), now.Month(), now.Day(), 16, 55, 00, 0, now.Location())
duration := time.Duration(0)
@@ -277,10 +277,11 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) {
appNameTagMap := map[string]string{
//"cmii-uav-platform-dispatchh5": "5.6.0-062401",
//"cmii-uav-data-post-process": "5.6.0-062401",
"cmii-uav-industrial-portfolio": "5.6.0-071801",
//"cmii-uav-developer": "5.6.0-062701",
//"cmii-uav-industrial-portfolio": "5.6.0-071801",
"cmii-uav-platform": "5.7.0",
//"cmii-uav-brain": "5.5.0",
//"cmii-uav-platform": "5.6.0-071702",
//"cmii-uas-lifecycle": "5.6.0-30403-071801",
//"cmii-uas-lifecycle": "5.6.0-30403-071802",
}
for appName, newTag := range appNameTagMap {

View File

@@ -504,6 +504,7 @@ func (op *CmiiK8sOperator) DeploymentUpdateTagByImageFullName(cmiiEnv, imageFull
return op.DeploymentUpdateTag(cmiiEnv, appName, newTag)
}
// DeploymentUpdateTag 更新一个Deployment的Tag返回true或者false。 同时更新IMAGE_VERSION BIZ_GROUP
func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string) bool {
if newTag == "" {
@@ -545,15 +546,15 @@ func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string)
tagVersion = strings.Split(newTag, "-")[0]
}
envList := container.Env
for _, envVar := range envList {
for index, envVar := range envList {
if envVar.Name == "IMAGE_VERSION" {
envVar.Value = tagVersion
envList[index].Value = tagVersion
}
if envVar.Name == "BIZ_CONFIG_GROUP" {
envVar.Value = tagVersion
envList[index].Value = tagVersion
}
if envVar.Name == "SYS_CONFIG_GROUP" {
envVar.Value = tagVersion
envList[index].Value = tagVersion
}
}
log.DebugF("[DeploymentUpdateTag] - update env IMAGE_VERSION to [%s]", tagVersion)

View File

@@ -84,7 +84,7 @@ func TestCmiiK8sOperator_DeploymentScale(t *testing.T) {
func TestCmiiK8sOperator_DeploymentUpdateTag(t *testing.T) {
start := time.Now()
DefaultCmiiOperator.DeploymentUpdateTag("demo", "cmii-uav-platform", "5.2.0-011001")
DefaultCmiiOperator.DeploymentUpdateTag("demo", "cmii-uav-gateway", "5.7.0")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,336 +0,0 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-platform
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "platform",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.250.0.110:8888",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,450 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.110:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.110:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.110:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.110:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.110:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.110:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

File diff suppressed because it is too large Load Diff

View File

@@ -1,756 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -1,216 +0,0 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.250.0.110:8033/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.250.0.110:8033/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.250.0.110:8033/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.250.0.110:8033/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.250.0.110:8033/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.250.0.110:8033/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -1,672 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.250.0.110
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.250.0.110
path: /var/lib/docker/nfs_data
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.250.0.110
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.250.0.110
path: /var/lib/docker/nfs_data
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.250.0.110
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.250.0.110
path: /var/lib/docker/nfs_data
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.250.0.110
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.250.0.110
path: /var/lib/docker/nfs_data
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.250.0.110
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.250.0.110
path: /var/lib/docker/nfs_data
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.250.0.110
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.250.0.110
path: /var/lib/docker/nfs_data

View File

@@ -1,456 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,499 +0,0 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: bjtg
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://10.250.0.110:8888;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
creationTimestamp: null
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 10.250.0.110:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 10.250.0.110
resources:
limits:
cpu: 1200m
memory: 6Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: bjtg/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: bjtg/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 10.250.0.110:8033/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://10.250.0.110:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 1200m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: bjtg/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
creationTimestamp: null
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 10.250.0.110:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.5.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.5.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.5.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://10.250.0.110:30935'
rtsp: 'rtsp://10.250.0.110:30554'
srt: 'srt://10.250.0.110:30556'
flv: 'http://10.250.0.110:30500'
hls: 'http://10.250.0.110:30500'
rtc: 'webrtc://10.250.0.110:30557'
replay: 'https://10.250.0.110:30333'
minio:
endpoint: http://10.250.0.110:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -510,27 +510,6 @@ func CmiiImageMapFromGzipFolder(gzipFileFolder string) (cmiiImageVersionMap map[
return cmiiImageVersionMap
}
func FrontendBackendSrsImageMapFromCmiiImageMap(cmiiImageVersionMap map[string]string) (frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap map[string]string) {
frontendImageVersionMap = make(map[string]string)
backendImageVersionMap = make(map[string]string)
srsImageVersionMap = make(map[string]string)
for imageName, imageTag := range cmiiImageVersionMap {
if strings.Contains(imageName, "platform") {
frontendImageVersionMap[imageName] = imageTag
} else if strings.Contains(imageName, "srs") {
srsImageVersionMap[imageName] = imageTag
} else if strings.Contains(imageName, "operator") {
srsImageVersionMap[imageName] = imageTag
} else {
backendImageVersionMap[imageName] = imageTag
}
}
return frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap
}
// GenerateCmiiTagVersionImageMap 生成特定版本的ImageTagMap
func GenerateCmiiTagVersionImageMap(specificTag string) (backendMap, frontendMap, srsMap map[string]string) {
matched, _ := regexp.MatchString(`^\d+\.\d+\.\d+$`, specificTag)

View File

@@ -9,7 +9,6 @@ import (
"wdd.io/agent-common/image"
"wdd.io/agent-common/utils"
"wdd.io/agent-deploy/d_app"
"wdd.io/agent-operator/real_project/zjjt"
)
func TestGetRunningContainer(t *testing.T) {
@@ -205,13 +204,6 @@ func TestConvertCmiiImageMapFromGzipFolder(t *testing.T) {
utils.BeautifulPrint(versionMap)
}
func TestFrontendBackendImageMapFromCmiiImageMap(t *testing.T) {
frontendImageVersionMap, backendImageVersionMap, _ := FrontendBackendSrsImageMapFromCmiiImageMap(zjjt.CmiiImageMap)
utils.BeautifulPrint(frontendImageVersionMap)
utils.BeautifulPrint(backendImageVersionMap)
}
func TestImageNameToTargetImageFullName(t *testing.T) {
AllCmiiImageTagList := []string{

View File

@@ -135,8 +135,8 @@ func TestHarborOperator_ArtifactDeleteOne(t *testing.T) {
}
func TestHarborOperator_CmiiTagFilter(t *testing.T) {
TestHarborOperator_BuildOperator(t)
imageMap := DefaultHarborOperator.CmiiTagFilter("5")
TestHarborOperator_BuildOperator_CMII(t)
imageMap := DefaultHarborOperator.CmiiTagFilter("5.7")
utils.BeautifulPrint(imageMap)
}

View File

@@ -111,3 +111,99 @@
2024-07-17-17-45-09 uavcloud-demo cmii-uas-lifecycle 5.6.0 5.6.0-30403-071701
2024-07-17-17-48-00 uavcloud-demo cmii-uav-platform 5.6.0-071701 5.6.0-071702
2024-07-18-10-05-00 uavcloud-demo cmii-uas-lifecycle 5.6.0-30403-071701 5.6.0-30403-071801
2024-07-18-17-17-58 uavcloud-demo cmii-uav-emergency 5.6.0-0704 5.7.0
2024-07-18-17-17-59 uavcloud-demo cmii-uav-gis-server 5.6.0 5.7.0
2024-07-18-17-18-00 uavcloud-demo cmii-uav-sense-adapter 5.6.0-0716 5.7.0
2024-07-18-17-18-02 uavcloud-demo cmii-open-gateway 5.6.0 5.7.0
2024-07-18-17-18-03 uavcloud-demo cmii-uav-cloud-live 5.6.0 5.7.0
2024-07-18-17-18-04 uavcloud-demo cmii-uav-mission 5.5.0-30015-061801 5.7.0
2024-07-18-17-18-06 uavcloud-demo cmii-uav-mqtthandler 5.6.0-30067-071604 5.7.0
2024-07-18-17-18-07 uavcloud-demo cmii-uav-alarm 5.6.0 5.7.0
2024-07-18-17-18-08 uavcloud-demo cmii-uav-material-warehouse 5.6.0-062602 5.7.0
2024-07-18-17-18-10 uavcloud-demo cmii-uav-integration 5.7.0-30015-29835-071601 5.7.0
2024-07-18-17-18-11 uavcloud-demo cmii-suav-supervision 5.6.0 5.7.0
2024-07-18-17-18-12 uavcloud-demo cmii-uav-airspace 5.6.0-0704 5.7.0
2024-07-18-17-18-14 uavcloud-demo cmii-uav-logger 5.6.0 5.7.0
2024-07-18-17-18-16 uavcloud-demo cmii-uav-threedsimulation 5.5.0 5.7.0
2024-07-18-17-18-18 uavcloud-demo cmii-admin-data 5.6.0 5.7.0
2024-07-18-17-18-19 uavcloud-demo cmii-uav-industrial-portfolio 5.6.0-071701 5.7.0
2024-07-18-17-18-20 uavcloud-demo cmii-uav-process 5.6.0-060601 5.7.0
2024-07-18-17-18-22 uavcloud-demo cmii-uav-surveillance 5.6.0-30015-070801 5.7.0
2024-07-18-17-18-23 uavcloud-demo cmii-uav-user 5.6.0-0704 5.7.0
2024-07-18-17-18-24 uavcloud-demo cmii-uav-developer 5.6.0-0708 5.7.0
2024-07-18-17-18-26 uavcloud-demo cmii-uav-data-post-process 5.6.0-062401 5.7.0
2024-07-18-17-18-27 uavcloud-demo cmii-admin-gateway 5.6.0 5.7.0
2024-07-18-17-18-29 uavcloud-demo cmii-uav-gateway 5.6.0-061202 5.7.0
2024-07-18-17-18-30 uavcloud-demo cmii-uav-waypoint 5.6.0 5.7.0
2024-07-18-17-18-31 uavcloud-demo cmii-admin-user 5.6.0 5.7.0
2024-07-18-17-18-33 uavcloud-demo cmii-uav-cms 5.5.0 5.7.0
2024-07-18-17-18-34 uavcloud-demo cmii-uav-device 5.6.0-0715 5.7.0
2024-07-18-17-18-36 uavcloud-demo cmii-uav-notice 5.6.0 5.7.0
2024-07-18-17-18-37 uavcloud-demo cmii-uav-oauth 5.6.0-0704 5.7.0
2024-07-18-17-18-38 uavcloud-demo cmii-uav-tower 5.6.0-062601 5.7.0
2024-07-18-17-18-40 uavcloud-demo cmii-uav-multilink 5.5.0 5.7.0
2024-07-18-17-18-41 uavcloud-demo cmii-uav-brain 5.5.0 5.7.0
2024-07-18-17-20-49 uavcloud-demo cmii-suav-platform-supervisionh5 5.6.0 5.7.0
2024-07-18-17-20-51 uavcloud-demo cmii-uav-platform-ai-brain 5.6.0 5.7.0
2024-07-18-17-20-52 uavcloud-demo cmii-uav-platform-cms-portal 5.6.0 5.7.0
2024-07-18-17-20-53 uavcloud-demo cmii-uav-platform-open 5.6.0-0704 5.7.0
2024-07-18-17-20-55 uavcloud-demo cmii-uav-platform-share 5.6.0 5.7.0
2024-07-18-17-20-56 uavcloud-demo cmii-suav-platform-supervision 5.6.0-0708 5.7.0
2024-07-18-17-20-57 uavcloud-demo cmii-uav-platform 5.6.0-071702 5.7.0
2024-07-18-17-20-58 uavcloud-demo cmii-uav-platform-armypeople 5.6.0-28028-071102 5.7.0
2024-07-18-17-21-00 uavcloud-demo cmii-uav-platform-media 5.6.0-0710 5.7.0
2024-07-18-17-21-02 uavcloud-demo cmii-uav-platform-mws 5.6.0 5.7.0
2024-07-18-17-21-04 uavcloud-demo cmii-uav-platform-oms 5.6.0 5.7.0
2024-07-18-17-21-05 uavcloud-demo cmii-uav-platform-securityh5 5.6.0 5.7.0
2024-07-18-17-26-40 uavcloud-demo cmii-uav-brain 5.7.0 5.5.0
2024-07-18-17-28-26 uavcloud-demo cmii-uav-multilink 5.7.0 5.5.0
2024-07-18-17-35-01 uavcloud-demo cmii-uas-lifecycle 5.6.0-30403-071801 5.6.0-30403-071802
2024-07-18-17-40-02 uavcloud-demo cmii-uas-lifecycle 5.6.0-30403-071802 5.6.0-30403-071801
2024-07-18-18-24-25 uavcloud-demo cmii-admin-data 5.7.0
2024-07-18-18-24-26 uavcloud-demo cmii-uav-gateway 5.7.0
2024-07-18-18-24-27 uavcloud-demo cmii-uav-tower 5.7.0
2024-07-18-18-24-29 uavcloud-demo cmii-uav-user 5.7.0
2024-07-18-18-24-30 uavcloud-demo cmii-open-gateway 5.7.0
2024-07-18-18-24-31 uavcloud-demo cmii-uav-data-post-process 5.7.0
2024-07-18-18-24-32 uavcloud-demo cmii-uav-oauth 5.7.0
2024-07-18-18-24-34 uavcloud-demo cmii-uav-sense-adapter 5.7.0
2024-07-18-18-24-35 uavcloud-demo cmii-admin-gateway 5.7.0
2024-07-18-18-24-36 uavcloud-demo cmii-admin-user 5.7.0
2024-07-18-18-24-38 uavcloud-demo cmii-uav-alarm 5.7.0
2024-07-18-18-24-40 uavcloud-demo cmii-uav-mission 5.7.0
2024-07-18-18-24-41 uavcloud-demo cmii-uav-notice 5.7.0
2024-07-18-18-24-42 uavcloud-demo cmii-uav-multilink 5.7.0
2024-07-18-18-24-43 uavcloud-demo cmii-uav-brain 5.7.0
2024-07-18-18-24-45 uavcloud-demo cmii-uav-developer 5.7.0
2024-07-18-18-24-46 uavcloud-demo cmii-uav-mqtthandler 5.7.0
2024-07-18-18-24-48 uavcloud-demo cmii-uav-process 5.7.0
2024-07-18-18-24-49 uavcloud-demo cmii-uav-threedsimulation 5.7.0
2024-07-18-18-24-50 uavcloud-demo cmii-uav-waypoint 5.7.0
2024-07-18-18-24-54 uavcloud-demo cmii-uav-airspace 5.7.0
2024-07-18-18-24-56 uavcloud-demo cmii-uav-material-warehouse 5.7.0
2024-07-18-18-24-58 uavcloud-demo cmii-suav-supervision 5.7.0
2024-07-18-18-25-00 uavcloud-demo cmii-uav-cms 5.7.0
2024-07-18-18-25-03 uavcloud-demo cmii-uav-emergency 5.7.0
2024-07-18-18-25-08 uavcloud-demo cmii-uav-gis-server 5.7.0
2024-07-18-18-25-14 uavcloud-demo cmii-uav-surveillance 5.7.0
2024-07-18-18-25-16 uavcloud-demo cmii-uav-cloud-live 5.7.0
2024-07-18-18-25-18 uavcloud-demo cmii-uav-device 5.7.0
2024-07-18-18-25-20 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0
2024-07-18-18-25-21 uavcloud-demo cmii-uav-integration 5.7.0
2024-07-18-18-25-23 uavcloud-demo cmii-uav-logger 5.7.0
2024-07-18-18-25-25 uavcloud-demo cmii-uav-platform-oms 5.7.0
2024-07-18-18-25-27 uavcloud-demo cmii-uav-platform-open 5.7.0
2024-07-18-18-25-28 uavcloud-demo cmii-uav-platform-securityh5 5.7.0
2024-07-18-18-25-29 uavcloud-demo cmii-suav-platform-supervision 5.7.0
2024-07-18-18-25-31 uavcloud-demo cmii-uav-platform-ai-brain 5.7.0
2024-07-18-18-25-32 uavcloud-demo cmii-uav-platform-armypeople 5.7.0
2024-07-18-18-25-34 uavcloud-demo cmii-uav-platform-media 5.7.0
2024-07-18-18-25-37 uavcloud-demo cmii-uav-platform-mws 5.7.0
2024-07-18-18-25-39 uavcloud-demo cmii-suav-platform-supervisionh5 5.7.0
2024-07-18-18-25-40 uavcloud-demo cmii-uav-platform 5.7.0
2024-07-18-18-25-42 uavcloud-demo cmii-uav-platform-cms-portal 5.7.0
2024-07-18-18-25-43 uavcloud-demo cmii-uav-platform-share 5.7.0
2024-07-18-18-28-00 uavcloud-demo cmii-uav-multilink 5.5.0
2024-07-18-18-30-21 uavcloud-demo cmii-uav-brain 5.5.0
2024-07-19-09-29-48 uavcloud-demo cmii-uav-platform-armypeople 5.7.0 5.7.0-29668-071901
2024-07-19-09-54-00 uavcloud-demo cmii-uav-platform-armypeople 5.7.0-29668-071901 5.7.0

View File

@@ -1,140 +0,0 @@
package bgtg
var AllCmiiImageTagList = []string{
"cmii-uav-tower:5.4.0-0319",
"cmii-uav-platform-logistics:5.4.0",
"cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"cmii-uav-platform-securityh5:5.4.0",
"cmii-uav-platform:5.4.0-25263-041102",
"cmii-uav-platform-ai-brain:5.4.0",
"cmii-uav-emergency:5.3.0",
"cmii-uav-kpi-monitor:5.4.0",
"cmii-uav-platform-splice:5.4.0-040301",
"cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"cmii-live-operator:5.2.0",
"cmii-uav-gateway:5.4.0",
"cmii-uav-platform-security:4.1.6",
"cmii-uav-integration:5.4.0-25916",
"cmii-uav-notice:5.4.0",
"cmii-uav-platform-open:5.4.0",
"cmii-srs-oss-adaptor:2023-SA",
"cmii-admin-gateway:5.4.0",
"cmii-uav-process:5.4.0-0410",
"cmii-suav-supervision:5.4.0-032501",
"cmii-uav-platform-cms-portal:5.4.0",
"cmii-uav-platform-multiterminal:5.4.0",
"cmii-admin-data:5.4.0-0403",
"cmii-uav-cloud-live:5.4.0",
"cmii-uav-grid-datasource:5.2.0-24810",
"cmii-uav-platform-qingdao:4.1.6-24238-qingdao",
"cmii-admin-user:5.4.0",
"cmii-uav-industrial-portfolio:5.4.0-28027-041102",
"cmii-uav-alarm:5.4.0-0409",
"cmii-uav-clusters:5.2.0",
"cmii-uav-platform-oms:5.4.0",
"cmii-uav-platform-hljtt:5.3.0-hjltt",
"cmii-uav-platform-mws:5.4.0",
"cmii-uav-autowaypoint:4.1.6-cm",
"cmii-uav-grid-manage:5.1.0",
"cmii-uav-platform-share:5.4.0",
"cmii-uav-cms:5.3.0",
"cmii-uav-oauth:5.4.0-032901",
"cmii-open-gateway:5.4.0",
"cmii-uav-data-post-process:5.4.0",
"cmii-uav-multilink:5.4.0-032701",
"cmii-uav-platform-media:5.4.0",
"cmii-uav-platform-visualization:5.2.0",
"cmii-uav-platform-emergency-rescue:5.2.0",
"cmii-app-release:4.2.0-validation",
"cmii-uav-device:5.4.0-28028-0409",
"cmii-uav-gis-server:5.4.0",
"cmii-uav-brain:5.4.0",
"cmii-uav-depotautoreturn:5.4.0",
"cmii-uav-threedsimulation:5.1.0",
"cmii-uav-grid-engine:5.1.0",
"cmii-uav-developer:5.4.0-040701",
"cmii-uav-waypoint:5.4.0-032901",
"cmii-uav-platform-base:5.4.0",
"cmii-uav-platform-threedsimulation:5.2.0-21392",
"cmii-uav-platform-detection:5.4.0",
"cmii-uav-logger:5.4.0-0319",
"cmii-uav-platform-seniclive:5.2.0",
"cmii-suav-platform-supervisionh5:5.4.0",
"cmii-uav-user:5.4.0",
"cmii-uav-surveillance:5.4.0-28028-0409",
"cmii-uav-mission:5.4.0-28028-041006",
"cmii-uav-mqtthandler:5.4.0-25916-041001",
"cmii-uav-material-warehouse:5.4.0-0407",
"cmii-uav-platform-armypeople:5.4.0-041201",
"cmii-suav-platform-supervision:5.4.0",
"cmii-uav-airspace:5.4.0-0402",
}
var AllCMiiImageFullNameList560 = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.7.0-30015-29835-071601",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.5.0-30015-061801",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:5.6.0-0716",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.6.0-061202",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.6.0-062401",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.6.0-062602",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.6.0-062601",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.6.0-060601",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.6.0-30015-070801",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.6.0-0715",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.6.0-071601",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.6.0-30067-071604",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.6.0-070401",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.6.0-0710",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.6.0-29267-0717",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:5.6.0-0709",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:5.6.0-0709",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.6.0-28028-071102",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
}

View File

@@ -1,114 +0,0 @@
#!/bin/bash
local_host_ip=20.4.16.200
# all_host_ip_list=(20.4.13.81 20.4.13.140 20.4.13.92 20.4.13.80)
all_host_ip_list=(20.4.13.80)
pass=V2ryStr@ngPss
copy_ssh_key_to_master(){
scp /root/.ssh/id_rsa root@20.4.13.81:/root/.ssh/id_rsa
scp /root/.ssh/id_rsa.pub root@20.4.13.81:/root/.ssh/id_rsa.pub
}
copy_ssh_key_to_master
install_nfs_server_suffix(){
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "systemctl start rpcbind && systemctl enable rpcbind && systemctl start nfs-server && systemctl enable nfs-server"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "mkdir -p /var/lib/docker/nfs_data && chmod 777 /var/lib/docker/nfs_data"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "echo \"/var/lib/docker/nfs_data *(rw,no_root_squash,no_all_squash,sync)\" >> /etc/exports"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "systemctl restart rpcbind && systemctl restart nfs-server"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "rpcinfo -p localhost"
}
# install_nfs_server_suffix
install_all_demand_softwares(){
local host
local dep_file_list=(tar-1.32-4.oe2003sp4.x86_64.rpm common_tool-openEuler-20.03-LTS-SP4.tar.gz nfs_utils-openEuler-20.03-LTS-SP4.tar.gz nginx-openEuler-20.03-LTS-SP4.tar.gz ntp-openEuler-20.03-LTS-SP4.tar.gz)
for host in ${all_host_ip_list[@]}
do
echo "current host is ${host}"
ssh -i /root/.ssh/id_rsa root@${host} "echo yes"
ssh -i /root/.ssh/id_rsa root@${host} "systemctl start rpcbind && systemctl enable rpcbind && systemctl start ntpd && systemctl enable ntpd"
# ssh -i /root/.ssh/id_rsa root@${host} "mkdir -p /root/wdd/dep/"
# for dep in ${dep_file_list[@]}
# do
# echo "dep file is ${dep}"
#
# ssh -i /root/.ssh/id_rsa root@${host} "wget http://20.4.16.200:9000/octopus/euler/${dep} -O /root/wdd/dep/${dep}"
# ssh -i /root/.ssh/id_rsa root@${host} "rpm -ivh /root/wdd/dep/tar*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf common_tool-openEuler-20.03-LTS-SP4.tar.gz && cd ./common_tool && rpm -ivh --force ./*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf nfs_utils-openEuler-20.03-LTS-SP4.tar.gz && cd ./nfs_utils && rpm -ivh --force ./*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf ntp-openEuler-20.03-LTS-SP4.tar.gz && cd ./ntp && rpm -ivh --force ./*.rpm"
#
# done
echo ""
done
}
# install_all_demand_softwares
test_base_command_exits() {
local base_command_list=(ifconfig mtr vgdisplay nslookup vim htop tar unzip iftop curl wget netstat git zsh)
local command
for command in "${base_command_list[@]}"; do
if command -v "$command" &>/dev/null; then
echo "$command exists"
else
echo "ERROR $command does not exist!"
fi
echo ""
done
}
test_service_exists(){
local base_service_list=(ntpd chronyd nginx nfs-server rpcbind docker)
local service
for service in "${base_service_list[@]}"; do
if ! systemctl list-unit-files | grep "$service.service"; then
echo "ERROR $service.service does not exist!"
fi
echo ""
done
}
# test_base_command_exits
# test_service_exists
change_host_name(){
hostnamectl set-hostname master-node
}
install_ssh_key(){
echo "" >> /root/.ssh/authorized_keys
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL9S6CSAjTFe2fy4bAIfqu90ft6E+GBRvS59kno6LDeAbqUQNYf9hEnIg07Ee/x5DlnYE0S3Ykv3WCHuVyBH2zANnC0P87SqphDGmoqdqF3r6uDaCr4lBsUqEai9X2q6dyjZj6ym+r4zQhMApNDzbhcyfKQ54tKFylGIdx6siyktuU/VbOzWc6G8r+BfFsQpMCA1ihmCY1jGjsKPqFlZGLeTrlBb1Zk0OV+GtDhlf/t0cd0kRPJoydm2juTXrZO+tFmf9turfKZsBnRYKtQBLJG5mF1hsjIqo8DHr+PUL2wRrSxEhGTZiJL4rNJo/kHhKXXsomc5RM/AnfgAfxrLlH zeasl@DESKTOP-K2F9GG3 ">> /root/.ssh/authorized_keys
echo "" >> /root/.ssh/authorized_keys
}
install_octopus_server_offline(){
bash <(curl -sL http://20.4.16.200:9000/octopus/init-script-wdd.sh) --url http://20.4.16.200:9000/octopus --agent-install --offline
bash <(curl -sL http://20.4.16.200:9000/octopus/init-script-wdd.sh) --url http://20.4.16.200:9000/octopus --agent-remove --offline
cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back
sed -i "s/StrictModes yes/StrictModes no/g" /etc/ssh/sshd_config
sed -i "s/AllowTcpForwarding no/AllowTcpForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/AllowAgentForwarding no/AllowAgentForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/PermitTunnel no/PermitTunnel yes/g" /etc/ssh/sshd_config
systemctl restart sshd
}
machinId(){
20.4.13.81 Chongqing-amd64-01 354d6db5354d6db5354d6db5354d6db5
20.4.13.140 Chongqing-amd64-02 2a216db5354d6db5354d6db5354d6db5
20.4.13.92 Chongqing-amd64-03 3ca26db5354d6db5354d6db5354d6db5
20.4.13.80 Chongqing-amd64-04 4ea1d6db5354d6db5354d6db5354d6db
}

View File

@@ -1,48 +0,0 @@
#!/bin/bash
mount_disk_to_var(){
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
local VG_NAME=datavg
local disk_name=/dev/vdb
local mount_dir=/var/lib/docker
echo "n
p
t
8e
w
" | fdisk ${disk_name}
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} ${disk_name}1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
# resize2fs /dev/mapper/${VG_NAME}-lvdata
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
local selffstab="/dev/mapper/${VG_NAME}-lvdata ${mount_dir} xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
xfs_growfs /dev/mapper/${VG_NAME}-lvdata
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
}
mount_disk_to_var

View File

@@ -1,274 +0,0 @@
version: '2.3'
services:
log:
image: goharbor/harbor-log:v2.9.0
container_name: harbor-log
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
volumes:
- /var/log/harbor/:/var/log/docker/:z
- type: bind
source: ./common/config/log/logrotate.conf
target: /etc/logrotate.d/logrotate.conf
- type: bind
source: ./common/config/log/rsyslog_docker.conf
target: /etc/rsyslog.d/rsyslog_docker.conf
ports:
- 127.0.0.1:1514:10514
networks:
- harbor
registry:
image: goharbor/registry-photon:v2.9.0
container_name: registry
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
- type: bind
source: /var/lib/docker/harbor-data/secret/registry/root.crt
target: /etc/registry/root.crt
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "registry"
registryctl:
image: goharbor/harbor-registryctl:v2.9.0
container_name: registryctl
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
env_file:
- ./common/config/registryctl/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
- type: bind
source: ./common/config/registryctl/config.yml
target: /etc/registryctl/config.yml
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "registryctl"
postgresql:
image: goharbor/harbor-db:v2.9.0
container_name: harbor-db
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/database:/var/lib/postgresql/data:z
networks:
harbor:
env_file:
- ./common/config/db/env
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "postgresql"
shm_size: '1gb'
core:
image: goharbor/harbor-core:v2.9.0
container_name: harbor-core
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
env_file:
- ./common/config/core/env
restart: always
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/ca_download/:/etc/core/ca/:z
- /var/lib/docker/harbor-data/:/data/:z
- ./common/config/core/certificates/:/etc/core/certificates/:z
- type: bind
source: ./common/config/core/app.conf
target: /etc/core/app.conf
- type: bind
source: /var/lib/docker/harbor-data/secret/core/private_key.pem
target: /etc/core/private_key.pem
- type: bind
source: /var/lib/docker/harbor-data/secret/keys/secretkey
target: /etc/core/key
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
harbor:
depends_on:
- log
- registry
- redis
- postgresql
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "core"
portal:
image: goharbor/harbor-portal:v2.9.0
container_name: harbor-portal
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
-
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
volumes:
- type: bind
source: ./common/config/portal/nginx.conf
target: /etc/nginx/nginx.conf
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "portal"
jobservice:
image: goharbor/harbor-jobservice:v2.9.0
container_name: harbor-jobservice
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
env_file:
- ./common/config/jobservice/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/job_logs:/var/log/jobs:z
- type: bind
source: ./common/config/jobservice/config.yml
target: /etc/jobservice/config.yml
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- core
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "jobservice"
redis:
image: goharbor/redis-photon:v2.9.0
container_name: redis
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/redis:/var/lib/redis
networks:
harbor:
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "redis"
proxy:
image: goharbor/nginx-photon:v2.9.0
container_name: nginx
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
volumes:
- ./common/config/nginx:/etc/nginx:z
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
ports:
- 8033:8080
depends_on:
- registry
- core
- portal
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "proxy"
networks:
harbor:
external: false

View File

@@ -1,180 +0,0 @@
nodes:
- address: 20.4.13.81
user: root
role:
- controlplane
- etcd
- worker
internal_address: 20.4.13.81
labels:
ingress-deploy: true
- address: 20.4.13.140
user: root
role:
- worker
internal_address: 20.4.13.140
- address: 20.4.13.92
user: root
role:
- worker
internal_address: 20.4.13.92
labels:
mysql-deploy: 'true'
authentication:
strategy: x509
sans:
- "20.4.13.81"
private_registries:
- url: 20.4.13.81:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
#ssh_key_path: /root/.ssh/id_ed25519
ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens3
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: { }
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"

View File

@@ -1,45 +0,0 @@
package main
//
//import (
// "wdd.io/agent-common/utils"
// cmii_operator "wdd.io/agent-operator"
//)
//
//var realConfig = `apiVersion: v1
//clusters:
//- cluster:
// certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1ERXhPREEyTURZeU5Gb1hEVE14TURFeE5qQTJNRFl5TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS2ZNCjFjTjBNUnhUTkRGdEZxcnRIZ0RPM29SV0dicmVob3VFcDJ3VUVRbU8yRUFyZDdkMUFReTJsSm9WM0RUVmhXbUwKcUFUOFcxaWRaS0x0Wm5mNjEva3JPeDd0U2lJeU4xa1ErN3NYRUhnTjVMc01EOVlKcndpUFdFY2FXdU9HVmI1aApMWDZWOTRjN0U5UlFDOENtd09iSkRCNG45ZE8zcDVlTDJHaFRpMkNrRWt3ZkRPR0tEL1IxeUNaK0tFcDRWWlplCnpwcnUzRG5zOUNqZHVOT1VBWTZzUGxjazNvdEdIVnhnRC9IRlRjUEhNbGhvUVQ4dmNDOTZwc0FtYXZPR1BZQ0YKa3RtN0VWYkZDOHN5Q1BMT3AwWWhTWHRkbGtKaC9UWHBaM0hSUWJxSzVPNXR4K1dGL05qMGJVc202ZldSMzZWQgpKQVVscUJIeFhSTzhGTFNrVHkwQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKeWZ2T3hHVVYvT2wybGRQNnYxeWFSTkd5RVkKWkVxTmM2Y29LSklsd0VQNUxNYzdZNGFReWorZCtVTE4zYmIrOXZsZXdHamluTHRrUW5HZ1R3Q3pKTU5ZNlNJNQo2NzJGZEtQTE85Szdpalhway9qRE9FVHJWS25aMXJBTytOUVBmSVhpcXQ3Y1RyVHlaVzdKTVl3emZNa2VlTGErCnREdmY1Rm5vQTBLN2U3a0ZXNTBpN2pXcGh4RXRMNEJpNzAwNnU4NEpqTU5weVp1MzhKMjFXZkR1RjBoU0NQREgKS0x4cnZIZ0FOYzJWU1c2L3JPaVVCQjdiV0JkcWcyQUNVRWZwN0V3UGs2S1BsdGNiNTJtdFhCU2xiQ3pRWWw4UQpmNmVGRFIrbnRjeXNGbU1FMFI3M1lNSHJwR0dGdlduSDVaTmEyVEJYdHpwN2tNNkVPREE5a2R4WkI1dz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
// server: https://192.168.11.170:16443
// name: kubernetes
//contexts:
//- context:
// cluster: kubernetes
// user: kubernetes-admin
// name: kubernetes-admin@kubernetes
//current-context: kubernetes-admin@kubernetes
//kind: Config
//preferences: {}
//users:
//- name: kubernetes-admin
// user:
// client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4ekNDQWR1Z0F3SUJBZ0lKQU9SWThQZlhadWQyTUEwR0NTcUdTSWIzRFFFQkN3VUFNQlV4RXpBUkJnTlYKQkFNVENtdDFZbVZ5Ym1WMFpYTXdIaGNOTWpJd01URTRNRFl6TmpRMFdoY05Nekl3TVRFMk1EWXpOalEwV2pBMApNUmN3RlFZRFZRUUtEQTV6ZVhOMFpXMDZiV0Z6ZEdWeWN6RVpNQmNHQTFVRUF3d1FhM1ZpWlhKdVpYUmxjeTFoClpHMXBiakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPNTZ0ZG51M24rWUsxM3oKZmNlTzNiSmhBL2J0SGpoQXpvRnNObmZjeEY3dlRTZGczSUxySmVNVkFGbG50MHpUL2xacFBlU0ZUN25iL1g1Ygo4RjErSHA2dVR0b0hRVGJHR2VzbEprdkpFMjB3OGJ0Z3VrdlNmTnROOS9NNlFTWWkvTGlHeTZpd2kveGdBVUtKClFtVW1vZmhZSHNKMllFbXJCcExOVFhtenl2a2lUTlJZVC9iNlJJRzNiT3lIVm1Lc1cwQkNQNVZTTFJsLzErZlMKM0dCUUZ2UTNXdTdmVWlzMW9DSXhsc1k5V2VJUmpGOWJDbWtKNnZsT3BWbGlsTlA0cEtSSnl4aXNBNzExNENNWAprRGJvRFBXb2lxMktubzYveXI2L0xwMktsVVVSa1JhQklodEl5eXV2TldPbjhiTW90SUpCNWNOems4UkxYTm5TCklPZEtMVDhDQXdFQUFhTW5NQ1V3RGdZRFZSMFBBUUgvQkFRREFnV2dNQk1HQTFVZEpRUU1NQW9HQ0NzR0FRVUYKQndNQ01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ1lwVk9NemlGRUFta1A4S3B2ZWttR3laVGV3dzQreVhyUwo3TEpoWTdTR2pGY210ZldMSW9PWEhmWmZlZWNsN3M5Snh1SytPZlhqU0d0UU9jWXk0WHo5OVFWY2FRandJMEg5Cnc3aWJiYUw3M093RGZrRDMrdlNhME9ZRWZKSFlsNXErQXBnQVpLVWRWazMvZHpJSmhRR0V6L0UxcjdYTlNabDUKL1hOT3pwbzl0VHV2dDAxRlllV0RMN01DeWZGRHFTelpQdnNyWW81bDFiTE5yeEZHb1dvSTdUMlJzR205VXJyYwoyTy84R2hMYTkwZ2tLeE9JTEpYdlJCY2RrOUN4N01ROGFGVHBuSmtPMXJzVzUxMTFoTG5hNm9WRHhISlVrbjRkCmNhODFDV3R1Yk44dkpSYlFwVmkySTJ5K3ljZ3lrNTMzR21GQXNVS3dkdm5rVjNqTVJVbFYKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
// client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBN25xMTJlN2VmNWdyWGZOOXg0N2RzbUVEOXUwZU9FRE9nV3cyZDl6RVh1OU5KMkRjCmd1c2w0eFVBV1dlM1ROUCtWbWs5NUlWUHVkdjlmbHZ3WFg0ZW5xNU8yZ2RCTnNZWjZ5VW1TOGtUYlREeHUyQzYKUzlKODIwMzM4enBCSmlMOHVJYkxxTENML0dBQlFvbENaU2FoK0ZnZXduWmdTYXNHa3MxTmViUEsrU0pNMUZoUAo5dnBFZ2JkczdJZFdZcXhiUUVJL2xWSXRHWC9YNTlMY1lGQVc5RGRhN3Q5U0t6V2dJakdXeGoxWjRoR01YMXNLCmFRbnErVTZsV1dLVTAvaWtwRW5MR0t3RHZYWGdJeGVRTnVnTTlhaUtyWXFlanIvS3ZyOHVuWXFWUlJHUkZvRWkKRzBqTEs2ODFZNmZ4c3lpMGdrSGx3M09UeEV0YzJkSWc1MG90UHdJREFRQUJBb0lCQVFDdTE5YldGbFNZdGNjdAoxYVJsRi9DZ3BKSlVpcHA2WWNGRmtFSUs5UmdnQmxESnl6RkE1d2hiQ2YyOGp0Y01BKzFZQzBidWNYTDNjRHZWClZiRFB5dlRHSUVQOWhBNGpDM0RiUHR4cCtkMDlWQUlYQUI3MkVqZXFUZXE1TC8rdDV6N2tSeWV2NE9oeE95NFIKU3pNYm1BeHVXS1VNcTkrQ2cxcUpiTzRkaVYwSjg5cUtidExsclFCeDFxcHNnUjNES1VhVGVNKzVpeFYyQ1Y1bApSNDV4aU43NWRrSkpaZlY2UUV5K3V2UVd0VHk4NUN3R1U2T2hjOXA4d2s0MmFrQS9qM05FTUZiTjdDaDFKbi9RCjRhNUJpMituRUE4dGVvV2FRSzdoeU5CRENWbTFsamFjaFFveGRSNGhCWVUxdkhTbkt4a0c4bDA1K1BpRTZmZFkKaUtyemhGR0JBb0dCQVBwOStKTExzZXJ6dFQ4a2VLU2FSMXBMOHB5MTQ3cmdjdEVhckxJL2ZqY1VMU3c3OUk3UAovWWhIWnhmdm9TZEZ2QTZwNy81eHFCRitaNTM5L1NKNDlLeWFOdGNJbW01UTZKSW9aRGgzWmVVS3lMKzA1YTdRCkNqMU1wZ2hKMlZDT2VPamNxd0NVQkFhcjNWSjd0cXRxRVFCQk9jMnlWU3dzbU5wclMyYmU1S3RCQW9HQkFQTzUKSG9ZVTBMK2tzdzJKUVM5ODF1ZWtrbDMzR1ZWQ2dPUFdGWThhR3VGRGt3Sm84WGk2TmhtKzh2TjlNaGg3WkYzeQpTU3E1U2RJd01pR0IvKzVJaWp1V25DbWszY2RPdGU0VFBsZHFvdjc3Q1FUUmxPNWJCekR0L1VqYVBBam5GS0FpClg4K0V6NUVXOXFSclN2ZXplZHFDRVRBVDhRWThqNk1WY0VCRW96aC9Bb0dCQUphcVRHZ25RdVdhSHF0VENZbWcKRGtqZW81Zmt3NHcwMG5xNWU2UmZFbENZdnk3N0JQY2RYVmFwOC9WdXVkVEFXZ1BMN1VGekpXOFlROFRBNzQvYgpodmVHYm5QYWhlRFNvNEM5OE1JUjl1VFVIcmxJV2xwU1ljWkxJeGFiTEs0S2MrbEVTVXE0dk04eWNwWFpPWjlTCjFkVDhab00xdjRzcGErcjhYRWNNekNmQkFvR0JBSXVuaXI4SDFHbk1CVEYvY1pPMWRDczkyUVR3MzFwRWhqaUgKWnNrZUMwTURCb3o5OTBmWFk4S3k4T0htM2pxN0VkTG5UMWVrM3BFTFB0NkdjRkZvelpUQmczQTFZVU9nYlkwagpCN2p0aU1LVXRDRkh1cEF1SnR1NXMwWDRqeWdHeVlITTBKdkhuV3lrL09WUCthQWYvblhmeTl1QndiMXlIRmcxCm82R2Y4dXNmQW9HQkFKeGlQcGdDODJPckoxazE3V3dyOFI2ZXpxR2VYb0JPRzFlOEN6ZG1UbWFrN3prWDJJelEKSTVjT3dGaTlnREhTbUVMa0dYZnRHZ01EcXF1VHVLdS9OdW9DQS94Z2FrdTQvVHplNktqbzRjc0NDTmFza3VrRQozYnhwSnU5cElYRU5tMXVuNXBZRy90QTF0V1Rtc3dnRjY1anc2RFpTQUFUTFZMSXg3RVRDR0RlOQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=`
//
//func main() {
// k8sOperator := cmii_operator.CmiiK8sOperator{}
// k8sOperator.BuildCurrentClientFromConfig(realConfig)
// realNamespace := "ingress-nginx"
//
// // get all pods
// allInterface := k8sOperator.PodAllInterface(realNamespace)
//
// for _, deploymentInterface := range allInterface {
// utils.BeautifulPrint(deploymentInterface)
// }
//
// // restart all backend
// cmii_operator.RestartCmiiBackendDeployment(realNamespace)
// cmii_operator.RestartCmiiFrontendDeployment(realNamespace)
//
//}

View File

@@ -1,2 +0,0 @@
projectId 1751084188582440961

View File

@@ -1,9 +0,0 @@
# internet
bash <(curl -sL http://42.192.52.227:9000/octopus/init-script-wdd.sh) --url http://42.192.52.227:9000/octopus --agent-install --offline
# no internet
export offline_minio=103.0.180.82
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --agent-install --offline

View File

@@ -1,78 +0,0 @@
package szga
var AllZipFileName = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.6.0-051602",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.3.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.6.0-051501",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.6.0-0513",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta",
"harbor.cdcyy.com.cn/cmii/cmii-uav-lifecycle:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.6.0-051401",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.6.0-051503",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.6.0-mr830-051502",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:5.6.0-051401",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:5.3.0-hjltt",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:5.6.0-051401",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:4.1.6-24238-qingdao",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
}

View File

@@ -1,114 +0,0 @@
package xmyd
var AllCmiiImageTagList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
}
var Real540ImageTagMap = map[string]string{
"cmii-admin-data": "5.4.0",
"cmii-admin-gateway": "5.4.0",
"cmii-admin-user": "5.4.0",
"cmii-open-gateway": "5.4.0",
"cmii-suav-supervision": "5.4.0",
"cmii-uav-airspace": "5.4.0",
"cmii-uav-alarm": "5.4.0",
"cmii-uav-brain": "5.4.0",
"cmii-uav-cloud-live": "5.4.0",
"cmii-uav-cms": "5.4.0",
"cmii-uav-developer": "5.4.0",
"cmii-uav-device": "5.4.0",
"cmii-uav-emergency": "5.4.0",
"cmii-uav-gateway": "5.4.0",
"cmii-uav-gis-server": "5.2.0",
"cmii-uav-industrial-portfolio": "5.4.0-xmyd-0326",
"cmii-uav-integration": "5.4.0",
"cmii-uav-logger": "5.4.0",
"cmii-uav-material-warehouse": "5.4.0",
"cmii-uav-mission": "5.4.0-xmyd-032501",
"cmii-uav-mqtthandler": "5.4.0-xmyd-032501",
"cmii-uav-notice": "5.4.0",
"cmii-uav-oauth": "5.4.0",
"cmii-uav-process": "5.4.0",
"cmii-uav-surveillance": "5.4.0",
"cmii-suav-platform-supervision": "5.4.0",
"cmii-suav-platform-supervisionh5": "5.4.0",
"cmii-uav-platform": "5.4.0-xmyd-032501",
"cmii-uav-platform-ai-brain": "5.4.0",
"cmii-uav-platform-armypeople": "5.4.0-xmyd-032502",
"cmii-uav-platform-base": "5.4.0",
"cmii-uav-platform-cms-portal": "5.4.0",
"cmii-uav-platform-detection": "5.4.0",
"cmii-uav-platform-emergency-rescue": "5.4.0",
"cmii-uav-platform-logistics": "5.4.0",
"cmii-uav-platform-media": "5.4.0",
"cmii-uav-platform-mws": "5.4.0",
"cmii-uav-platform-oms": "5.4.0",
"cmii-uav-platform-open": "5.4.0",
"cmii-uav-platform-security": "5.4.0",
"cmii-uav-platform-seniclive": "5.4.0",
"cmii-uav-platform-share": "5.4.0",
"cmii-uav-platform-splice": "5.4.0",
"cmii-uav-platform-visualization": "5.4.0",
}

View File

@@ -1 +0,0 @@
package xzyd

View File

@@ -1,64 +0,0 @@
package zjjt
var CmiiImageMap = map[string]string{
"cmii-admin-data": "5.5.0",
"cmii-admin-gateway": "5.5.0",
"cmii-admin-user": "5.5.0",
"cmii-app-release": "4.2.0-validation",
"cmii-live-operator": "5.2.0",
"cmii-open-gateway": "5.5.0",
"cmii-srs-oss-adaptor": "2023-SA",
"cmii-suav-platform-supervision": "5.5.0",
"cmii-suav-platform-supervisionh5": "5.5.0",
"cmii-suav-supervision": "5.4.0-032501",
"cmii-uav-airspace": "5.5.0",
"cmii-uav-alarm": "5.5.0",
"cmii-uav-autowaypoint": "4.2.0-beta",
"cmii-uav-brain": "5.5.0",
"cmii-uav-cloud-live": "5.5.0",
"cmii-uav-cms": "5.5.0",
"cmii-uav-data-post-process": "5.5.0",
"cmii-uav-developer": "5.5.0",
"cmii-uav-device": "5.5.0",
"cmii-uav-emergency": "5.3.0",
"cmii-uav-gateway": "5.5.0",
"cmii-uav-gis-server": "5.5.0",
"cmii-uav-grid-datasource": "5.2.0-24810",
"cmii-uav-grid-engine": "5.1.0",
"cmii-uav-grid-manage": "5.1.0",
"cmii-uav-industrial-portfolio": "5.5.0-041801",
"cmii-uav-integration": "5.5.0-0419",
"cmii-uav-kpi-monitor": "5.5.0",
"cmii-uav-logger": "5.5.0",
"cmii-uav-material-warehouse": "5.5.0",
"cmii-uav-mission": "5.5.0",
"cmii-uav-mqtthandler": "5.5.0",
"cmii-uav-multilink": "5.5.0",
"cmii-uav-notice": "5.5.0",
"cmii-uav-oauth": "5.5.0",
"cmii-uav-platform": "5.5.0",
"cmii-uav-platform-ai-brain": "5.5.0",
"cmii-uav-platform-armypeople": "5.5.0",
"cmii-uav-platform-base": "5.4.0",
"cmii-uav-platform-cms-portal": "5.5.0",
"cmii-uav-platform-detection": "5.5.0",
"cmii-uav-platform-jiangsuwenlv": "4.1.3-jiangsu-0427",
"cmii-uav-platform-logistics": "5.5.0",
"cmii-uav-platform-media": "5.5.0",
"cmii-uav-platform-multiterminal": "5.5.0",
"cmii-uav-platform-mws": "5.5.0",
"cmii-uav-platform-oms": "5.5.0",
"cmii-uav-platform-open": "5.5.0-0419",
"cmii-uav-platform-qinghaitourism": "4.1.0-21377-0508",
"cmii-uav-platform-security": "5.5.0",
"cmii-uav-platform-securityh5": "5.5.0",
"cmii-uav-platform-share": "5.5.0",
"cmii-uav-platform-splice": "5.5.0",
"cmii-uav-platform-threedsimulation": "5.2.0-21392",
"cmii-uav-process": "5.5.0",
"cmii-uav-surveillance": "5.5.0",
"cmii-uav-threedsimulation": "5.5.0",
"cmii-uav-tower": "5.5.0",
"cmii-uav-user": "5.5.0",
"cmii-uav-waypoint": "5.5.0",
}

View File

@@ -1,65 +0,0 @@
package zjjt
var RealImagePullList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.4.0-032501",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.3.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.5.0-041801",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.5.0-0419",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.5.0-0419",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.5.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
}

View File

@@ -1,125 +0,0 @@
package main
//
//import (
// "wdd.io/agent-common/utils"
// cmiioperator "wdd.io/agent-operator"
//)
//
//var realConfig = `apiVersion: v1
//kind: Config
//clusters:
//- cluster:
// api-version: v1
// certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN3akNDQWFxZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFTTVJBd0RnWURWUVFERXdkcmRXSmwKTFdOaE1CNFhEVEkwTURNd056QTVNamd3TkZvWERUTTBNRE13TlRBNU1qZ3dORm93RWpFUU1BNEdBMVVFQXhNSAphM1ZpWlMxallUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUwvNENFYy96cnNPCnpzTG9OVlhTQWFLTkpldGs2RlBCbFYvditLcFNOelFOY1FsZ0hSN2NSSWl0c0N2eHBvYUtucFY4VEFLZFJpb3gKTGRPakM4a1E1OUt3cXk5SXU1Wk5LYWpOaDVIZDNCdzlMOHJiUVJoTThwRWp3dzRJTFdhdzNNMlF2NnA2YjdqRgpQN0h1c3VWZW1JVEl4TTl1T3BtQzNVOWZaQzVIbVpKZDdpaEJzaVpMR2lZOGVES2lPbGh6am10amNQWUFiUnE4Cml6UW1zcmdhUityb203YTdBQTdxU3ZTdHlyTmRjbXFBQmRvU3lEUDhaOFBzWlB2djhWSisyOUJ1eEgveVhCLzIKaVBsaG83Yjl4eGduSmJxaURRS0NsbzVjcFBzbWpQQ0JkZmJPVk9ORzhRZzY1UmJPems2TnNXUzNvLzFWVklaSwpqeVMyZjFlcjFBMENBd0VBQWFNak1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CCkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQjRNd3ZyWGZOaHBWTVpFNWMyY2gxS05CYStJNGc1Z2tnU1YKajNGN3BRV1FpbXZSb1VDSmhjOEJiZmRIVnN1VzE5NE9FREx6WUtONGxDb20vWUVZNUN3R0lwVjhVRlN0aDJuZQpxcTRhM1crVW4xM2Z1TWdBemxTblFSQ3F0d01UbEh3cnlnaUQ0bE5HMGVEdVdoVlMwdVpZSHFpV0Y2OENUZmp5Cng3UVhsVmNrTU1XZzhoSlIwNG1QV1BhYis5cDd0b3Q1WWZwK0kxOWU5V2dpelJNNCs3TGoxUmpCVGN4WGFaYWgKL3JrMjZzV3JmK0xkcEh6c0U1cFc3LzlKM09MNGdTWFJKb09kclQwK1lsczVIRm83Q1d5TW1tbmVxMlR4Q2tHSwpxTkVzNUUrdDYrYStCL3B0cXZHd3RmbnlKeFV1YkZhY3FJeG1vbGo3UW52OWR1RVRiQkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
// server: "https://127.0.0.1:6443"
// name: "rke-cluster"
//contexts:
//- context:
// cluster: "rke-cluster"
// user: "kube-admin-rke-cluster"
// name: "rke-cluster"
//current-context: "rke-cluster"
//users:
//- name: "kube-admin-rke-cluster"
// user:
// client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lJZEtaNDNXVVpLOE13RFFZSktvWklodmNOQVFFTEJRQXdFakVRTUE0R0ExVUUKQXhNSGEzVmlaUzFqWVRBZUZ3MHlOREF6TURjd09USTRNRFJhRncwek5EQXpNRFV3T1RVd05EWmFNQzR4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVJNd0VRWURWUVFERXdwcmRXSmxMV0ZrYldsdU1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTA0M1hyS215Rkgvemw5SU9ubjJkODN5Mlo2Rm4KMXhiYVZMN09nTXlZelVCS204WFdWY0V5L1RaRTBnV1pJdm9nTmtVOGptd0F6d0dxb2dmeS9nVVk2VWRINkVDQgowcVRMUDFkQTlJSU1XL3c5SlpjUU0wTWw3Qi9NUVNYbWRkRmZhWHk1TjlYYWpoSVB3ZFFKRFNOZ2cwblRKZnYvCmZSaU1PUWhMYTVBUUNHQjFEZ2pjdC8xd1dZSEF4Qks1Rlk0QTh0UTA4SzlxV1ovYnpQWXUzMGlsWjkvTllrcHAKRHVpVUhYZEdEZHAvbUtianl5LzcwVktXUmxDSmlCUWpXajdTZEd5dEZtNTN6YW9CdGh5OFhibFNaVHR4QUx6bgp5UWYweENrZGxZeWFaMFJDOXhvaFF0NzZQNFkzZmhaYlpMaStjV2MwRG1SdlJEN0FhSGREb1EwQ0tRSURBUUFCCm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFEVlEwbTEvTk0xdHBzTlVhRDhMeGJNRUw3NEVvR2RVWVpoZWtGbWRBQXBISENEUgpiMjNuYzBtN2FIbmdGZEZEQk9lNFBEaDFnKzY0V2JrNjdiUHpXUjJ6NE1kdlhnQW5vdUhjZ2xJMUYxdUpVWVJ2CmZJdmVlem82UkFqYjUrcXB5c1IxbmkwMEtGQjZJQU5oMW9zRElKNUNkTXJma2xxWDQvK0hTbDZ6alJPU2xlYmIKTy9mWFduemt3cGRtNFFPQ2xjRTBHTDlZNHl4Q25nd3VWc3lTMWI0OHpobk5GTDhVUGxpNC9YQVM5cVBVSzdZYwpYYWpHeWs1cFkrRFVhMFN2NDdweVhFUVZNREVzQmQwUGJ6eGk0anp0cHcvQjlQbm5OQVVpN05UMVh1aEFyOUMxCmI0Mjl4UHQySjE2ejZycXp5b3VXUFQ3RHM1WEVTQnM4dDZISFBRcz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
// client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMDQzWHJLbXlGSC96bDlJT25uMmQ4M3kyWjZGbjF4YmFWTDdPZ015WXpVQkttOFhXClZjRXkvVFpFMGdXWkl2b2dOa1U4am13QXp3R3FvZ2Z5L2dVWTZVZEg2RUNCMHFUTFAxZEE5SUlNVy93OUpaY1EKTTBNbDdCL01RU1htZGRGZmFYeTVOOVhhamhJUHdkUUpEU05nZzBuVEpmdi9mUmlNT1FoTGE1QVFDR0IxRGdqYwp0LzF3V1lIQXhCSzVGWTRBOHRRMDhLOXFXWi9ielBZdTMwaWxaOS9OWWtwcER1aVVIWGRHRGRwL21LYmp5eS83CjBWS1dSbENKaUJRaldqN1NkR3l0Rm01M3phb0J0aHk4WGJsU1pUdHhBTHpueVFmMHhDa2RsWXlhWjBSQzl4b2gKUXQ3NlA0WTNmaFpiWkxpK2NXYzBEbVJ2UkQ3QWFIZERvUTBDS1FJREFRQUJBb0lCQURLeUpnSDVkSFJYS1MregpwYzh6T1J1MVFoelpZQUg2TnYzaDc2aUwzdjRvcnZoZlUzcWZYckd4UkpLenhydk1CdFlhaDJWMTJrZkJGWHZZCnZkRkR0WEdKcEdDeXZLbVcxaUhxcmVVcUdQNGVGeVVmNjBEdGtYUGhOdGhSMWNWY0ZDbzZPa1I0R2ZTN3ZVenMKbS9LckRLREptekRhRDZLUnRHQ2liVGhzZ0hzUStsOXhQQ3RYYldSVTlIV2cwUTY4Y1l1TVMzRUhyNlJtbHVVdAoxeC8veklLUnNLSksxL2ZpS0o2bHMxUVhYb01EMHVQK1dPYlNsQnNiZkNpM0Z2SmNBdDNOVnc1eEtJMFJxN0R2Ck1LdEp0WUFreFNhK2NWZ3BNSEh5WFZmaVh0VnhVT0dzRzk2OUlRWWlNdzh3TkFwa2ZRbHNOSm9MWkdpemJBWkEKNEhrZjA0RUNnWUVBMUx3SHFGYXNDcElxOFFxVkdwOEkvMjNIcXF5bm1rQThTVS9BK2RVV3hxVjJ4L2dWQy9oZApCc3FiZHNLS3Y4RU5Sc3BRNm5sc0FpU05xS0hHeUZvbzF6UVhFVHE4WVhIaG5GVGZoMm5uVFZwRmJCNVdhTTRXCmRaa04vUzZsSGhDaDIxTnJUcEl0dnhjM0JDemc3NloxVHFaV01yc3JCZE9tbDZMUnNJUzZRTkVDZ1lFQS9wUmEKczI3MzFKZjYra0p0VXRYemVGbk9aeEZOR0hTemFFNmI0UnhuWERmN2w2R0tmMElwemtqVUhjUzA0bWpNditmbApJaDBsVGVYaE5hQm13ZGwwU3U1djUyWUFreFlvMmFoMVJWZk5QMEVqdkw4QWtUb2RsSEE1TGhjaVVhWjlBWkRLCmJXS0QwbGMzL0Q5bmVlSGpSZFpMSmhoVW5DNlFTbU9ad3Q4SFFka0NnWUVBZ0FRKzMzQjR5MHUyaDZNRW95WjgKOWFrTWRJcTl1VGRha0F0c1oydHg3MHgzTkZMMzUySW9LUVFYbGRud1FRQmRpdklJeTNFU0xCL3ZGMEZ6Sy9JRgpqYXVORGhNNGRiTmdQd0ZjR2xNQ25DdnNodW1pdWlMNnBQM2J5elljcXdEN1JjN25UanJ0U0ljaDFtTmpZUlBjCmw5M0ZGWFpJcDVMOE4xZ0ZzNkhMcTJFQ2dZRUEwVFJZMU50OERkaFhCeEZQaGFNTVVQcDhBM2JVUkNTaXlqVFAKSkU2VElkVmZpMXZVMUg4cW03cDlsWGp3cko0eXBBR002bHZKTEJxYzE5VFluTFIyUEoxMG1GUGFaUVR3ek8wQwpjZG1WY1VXMmVJVDlrbHFQdEV3RXNUdVJtRWVZc3BDcHlQb01HZTVTczVmbkVPSHdRcE8zYmJiUTBRZnl5eTdPClRMVzY0UUVDZ1lBNWZxbUhkYjU1Y0ZReDNyWlVqTkMyN3o5MTVBMzRjdkVLTlIvSjAxekFicUlHWFJ3dWRsQlcKYWQ5S1ZrSzhIenZHRVRlUTU1NmNXTU9yRGhyejZrSS9GWE9TL3poNnJmQ1JKV0xCL3ptSXlsdU8yZmR4VmQ2UAo5eStJY0tIN3dCcXFubkxlN3Nxb2FHU2Q5UTEzdTc4QWhnbGN1N3BocUlaWmVscHdMemRjYlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=`
//
//func main() {
// k8sOperator := cmiioperator.CmiiK8sOperator{}
// k8sOperator.BuildCurrentClientFromConfig(realConfig)
// realNamespace := "zyga"
//
// // get all pods
// //allInterface := k8sOperator.PodAllInterface(realNamespace)
// //
// //for _, deploymentInterface := range allInterface {
// // utils.BeautifulPrint(deploymentInterface)
// //}
//
// // restart all backend
// //cmii_operator.RestartCmiiBackendDeployment(realNamespace)
//
// //cmii_operator.RestartCmiiFrontendDeployment(realNamespace)
//
// backMap := map[string]string{
// //"cmii-admin-data": "5.4.0",
// //"cmii-admin-gateway": "5.4.0",
// //"cmii-admin-user": "5.4.0",
// //"cmii-app-release": "4.2.0-validation",
// //"cmii-open-gateway": "5.4.0",
// //"cmii-suav-supervision": "5.2.0",
// //"cmii-uav-airspace": "5.4.0",
// //"cmii-uav-alarm": "5.4.0",
// //"cmii-uav-autowaypoint": "4.1.6-cm",
// //"cmii-uav-brain": "5.4.0",
// //"cmii-uav-cloud-live": "5.4.0",
// //"cmii-uav-clusters": "5.2.0",
// //"cmii-uav-cms": "5.3.0",
// //"cmii-uav-data-post-process": "5.4.0",
// //"cmii-uav-depotautoreturn": "5.4.0",
// //"cmii-uav-developer": "5.4.0",
// //"cmii-uav-device": "5.4.0-25916",
// //"cmii-uav-emergency": "5.3.0",
// //"cmii-uav-gateway": "5.4.0",
// //"cmii-uav-gis-server": "5.4.0",
// //"cmii-uav-grid-datasource": "5.2.0-24810",
// //"cmii-uav-grid-engine": "5.1.0",
// //"cmii-uav-grid-manage": "5.1.0",
// //"cmii-uav-industrial-portfolio": "5.4.0-27348-1",
// //"cmii-uav-integration": "5.4.0-25916",
// //"cmii-uav-kpi-monitor": "5.4.0",
// //"cmii-uav-logger": "5.4.0",
// //"cmii-uav-material-warehouse": "5.4.0",
// //"cmii-uav-mission": "5.4.0-26462-0307",
// //"cmii-uav-mqtthandler": "5.4.0-25916",
// //"cmii-uav-multilink": "5.4.0",
// //"cmii-uav-notice": "5.4.0",
// //"cmii-uav-oauth": "5.4.0",
// //"cmii-uav-process": "5.4.0",
// "cmii-uav-surveillance": "5.4.0-leaflet",
// //"cmii-uav-threedsimulation": "5.1.0",
// //"cmii-uav-tower": "5.4.0",
// //"cmii-uav-user": "5.4.0",
// //"cmii-uav-waypoint": "5.4.0-26768",
// }
// //
// frontMap := map[string]string{
// "cmii-uav-platform": "5.4.0-leaflet",
// //"cmii-suav-platform-supervision": "5.4.0",
// //"cmii-suav-platform-supervisionh5": "5.4.0",
// //"cmii-uav-platform-ai-brain": "5.4.0",
// //"cmii-uav-platform-armypeople": "5.4.0",
// //"cmii-uav-platform-base": "5.4.0",
// //"cmii-uav-platform-cms-portal": "5.4.0",
// //"cmii-uav-platform-detection": "5.4.0",
// //"cmii-uav-platform-emergency-rescue": "5.2.0",
// //"cmii-uav-platform-hljtt": "5.3.0-hjltt",
// //"cmii-uav-platform-jiangsuwenlv": "4.1.3-jiangsu-0427",
// //"cmii-uav-platform-logistics": "5.4.0",
// //"cmii-uav-platform-media": "5.4.0",
// //"cmii-uav-platform-multiterminal": "5.4.0",
// //"cmii-uav-platform-mws": "5.4.0",
// //"cmii-uav-platform-oms": "5.4.0",
// //"cmii-uav-platform-open": "5.4.0",
// //"cmii-uav-platform-qingdao": "4.1.6-24238-qingdao",
// //"cmii-uav-platform-qinghaitourism": "4.1.0-21377-0508",
// //"cmii-uav-platform-security": "4.1.6",
// //"cmii-uav-platform-securityh5": "5.4.0",
// //"cmii-uav-platform-seniclive": "5.2.0",
// //"cmii-uav-platform-share": "5.4.0",
// //"cmii-uav-platform-splice": "5.4.0",
// //"cmii-uav-platform-threedsimulation": "5.2.0-21392",
// //"cmii-uav-platform-visualization": "5.2.0",
// }
// //
// cmiioperator.CmiiOperator = k8sOperator
// //
// result := cmiioperator.UpdateCmiiImageTagFromNameTagMap(realNamespace, backMap)
// utils.BeautifulPrint(result)
//
// result = cmiioperator.UpdateCmiiImageTagFromNameTagMap(realNamespace, frontMap)
// utils.BeautifulPrint(result)
//
//}