Merge branch 'refs/heads/local-ss'

This commit is contained in:
zeaslity
2024-04-26 17:51:26 +08:00
72 changed files with 16394 additions and 527 deletions

View File

@@ -0,0 +1,68 @@
package assert
import (
"fmt"
"reflect"
"strings"
)
var Asserter = NewAssert()
// Assert utility class
type Assert struct{}
// NewAssert returns a new instance of Assert
func NewAssert() *Assert {
return &Assert{}
}
// NotEmpty checks if the given value is not empty
func (a *Assert) NotEmpty(value interface{}, message string) {
if isEmptyValue(reflect.ValueOf(value)) {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
// NotBlank checks if the given string is not blank
func (a *Assert) NotBlank(str string, message string) {
if str == "" || len(strings.TrimSpace(str)) == 0 {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
// Equals checks if two values are equal
func (a *Assert) Equals(expected, actual interface{}, message string) {
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("Assertion failed: %s. Expected '%v' but got '%v'", message, expected, actual))
}
}
// Nil checks if the given value is nil
func (a *Assert) Nil(value interface{}, message string) {
if value != nil {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
// NotNil checks if the given value is not nil
func (a *Assert) NotNil(value interface{}, message string) {
if value == nil {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Map:
return v.IsNil() || v.Len() == 0
case reflect.Ptr:
if v.IsNil() {
return true
}
return isEmptyValue(v.Elem())
default:
return false
}
}

View File

@@ -5,7 +5,6 @@ go 1.22.1
require (
go.uber.org/zap v1.27.0
golang.org/x/net v0.24.0
gopkg.in/yaml.v3 v3.0.1
)
require go.uber.org/multierr v1.10.0 // indirect

View File

@@ -12,7 +12,5 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -133,6 +133,11 @@ func GzipFileNameToImageFullName(gzipFileName string) (imageFullName string) {
}
gzipFileName = strings.TrimSuffix(gzipFileName, ".tar.gz")
if strings.HasPrefix(gzipFileName, "docker=library") {
// docker=library=busybox=latest.tar.gz
return strings.Split(gzipFileName, "=")[2] + ":" + strings.Split(gzipFileName, "=")[3]
}
if strings.HasPrefix(gzipFileName, "docker") {
return strings.Split(gzipFileName, "=")[1] + "/" + strings.Split(gzipFileName, "=")[2] + ":" + strings.Split(gzipFileName, "=")[3]
}
@@ -143,3 +148,27 @@ func GzipFileNameToImageFullName(gzipFileName string) (imageFullName string) {
return gzipFileName
}
func GzipFileNameToImageNameAndTag(gzipFileName string) (imageName, imageTag string) {
if !strings.HasSuffix(gzipFileName, ".tar.gz") {
log.ErrorF(" %s is not end with .tar.gz", gzipFileName)
return "", ""
}
gzipFileName = strings.TrimSuffix(gzipFileName, ".tar.gz")
if strings.HasPrefix(gzipFileName, "docker=library") {
// docker=library=busybox=latest.tar.gz
return strings.Split(gzipFileName, "=")[2], strings.Split(gzipFileName, "=")[3]
}
if strings.HasPrefix(gzipFileName, "docker") {
// docker=kubernetes=kubernetes-dashboard=v2.4.0.tar.gz
return strings.Split(gzipFileName, "=")[1] + "/" + strings.Split(gzipFileName, "=")[2], strings.Split(gzipFileName, "=")[3]
}
if strings.HasPrefix(gzipFileName, "cmlc=cmii=") {
return strings.Split(gzipFileName, "=")[2], strings.Split(gzipFileName, "=")[3]
}
return "", ""
}

View File

@@ -0,0 +1,79 @@
package pusher
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"wdd.io/agent-common/logger"
)
var log = logger.Log
// CmiiUpdateMessage message_pusher/cmii/CmiiMessage.go
type CmiiUpdateMessage struct {
Namespace string
AppName string
FromTag string
ToTag string
Replicas string
DeployStatus bool
}
type Message struct { // TODO combine with server.message
ID string
Event string
Time int64
Topic string
Message string
Title string
Priority int
Tags []string
Click string
Icon string
// Additional fields
TopicURL string
SubscriptionID string
Raw string
}
func (c *CmiiUpdateMessage) SendMessage() (message Message) {
// 将结构体转换为JSON字符串
requestBytes, err := json.Marshal(c)
if err != nil {
fmt.Println("Error encoding request body to JSON:", err)
return
}
url := "http://192.168.35.71:8080/cmii/update" // 替换为实际的API地址
req, err := http.NewRequest("POST", url, bytes.NewBuffer(requestBytes))
if err != nil {
fmt.Println("Error creating request:", err)
return
}
// 添加请求头
req.Header.Set("Content-Type", "application/json")
// 发送请求并获取响应
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error sending request:", err)
}
defer resp.Body.Close()
bodyBytes, _ := io.ReadAll(resp.Body)
var m Message
err = json.Unmarshal(bodyBytes, &m)
if err != nil {
log.ErrorF("Error unmarshaling response body to JSON:", err)
return message
}
return m
}

View File

@@ -0,0 +1,16 @@
package pusher
import "testing"
func TestCmiiUpdateMessage_SendMessage(t *testing.T) {
c := &CmiiUpdateMessage{
Namespace: "dev",
AppName: "cmii-uav-gateway",
FromTag: "5.1.0",
ToTag: "5.5.0",
Replicas: "2",
DeployStatus: false,
}
c.SendMessage()
}

View File

@@ -1,11 +0,0 @@
package cmii
// CmiiUpdateMessage message_pusher/cmii/CmiiMessage.go
type CmiiUpdateMessage struct {
Namespace string
AppName string
FromTag string
ToTag string
Replicas string
DeployStatus bool
}

View File

@@ -68,6 +68,30 @@ func AppendContentToFile(content string, targetFile string) bool {
return true
}
// AppendContentWithSplitLineToFile 专门为k8s的yaml文件设计的在每次写入内容之前先写入一行分隔符
func AppendContentWithSplitLineToFile(content string, targetFile string) bool {
// 打开文件用于追加。如果文件不存在,将会创建一个新文件。
file, err := os.OpenFile(targetFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.ErrorF("[BasicAppendContentToFile] - Error opening file: %s , error is %s", targetFile, err.Error())
return false
}
defer file.Close() // 确保文件最终被关闭
// 写入内容到文件
if _, err := file.WriteString("---"); err != nil {
log.ErrorF("[BasicAppendContentToFile] - Error writing to file: %s , error is %s", targetFile, err.Error())
return false
}
if _, err := file.WriteString(content); err != nil {
log.ErrorF("[BasicAppendContentToFile] - Error writing to file: %s , error is %s", targetFile, err.Error())
return false
}
return true
}
// AppendNullToFile 清空一个文件
func AppendNullToFile(targetFile string) bool {
@@ -96,6 +120,15 @@ func WordSpaceCompletion(source string, totalLength int) string {
return source
}
// IsFileOrDir 如果是目录则返回true是文件则返回false
func IsFileOrDir(path string) bool {
info, err := os.Stat(path)
if err != nil {
return false
}
return info.IsDir()
}
// FileExists 文件存在返回true不存在返回false如果文件是一个目录也返回false
func FileExists(fileFullPath string) bool {
_, err := os.Stat(fileFullPath)
@@ -127,12 +160,21 @@ func FileExistAndNotNull(filename string) bool {
return size > 0
}
// ListAllFileInFolder 列出一个目录中的所有文件返回文件名忽略folder不带全路径
func ListAllFileInFolder(folderName string) ([]string, error) {
return listAllFileInFolderWithFullPath(folderName, false)
}
func listAllFileInFolderWithFullPath(folderName string, fullPath bool) ([]string, error) {
files := make([]string, 0)
err := filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
if fullPath {
files = append(files, path)
} else {
files = append(files, info.Name())
}
}
return nil
})
if err != nil {

View File

@@ -0,0 +1,17 @@
package utils
import (
"math/rand"
"time"
)
func GenerateRandomString(length int) string {
rand.Seed(time.Now().UnixNano())
chars := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
b := make([]byte, length)
for i := range b {
b[i] = chars[rand.Intn(len(chars))]
}
return string(b)
}

View File

@@ -3,6 +3,8 @@ package a_status
import (
"fmt"
"github.com/shirou/gopsutil/v3/disk"
"os"
"path/filepath"
"regexp"
"time"
)
@@ -115,3 +117,45 @@ func MatchNeededDisk(deviceName string) bool {
return true
}
func dirSize(path string) (int64, error) {
var size int64
err := filepath.Walk(path, func(filePath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
size += info.Size()
}
return nil
})
return size, err
}
//func DiskUsages() {
//
//
// root := "/"
// maxDepth := 3
//
// fmt.Println("Scanning directories...")
// err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
// if err != nil {
// return err
// }
// if info.IsDir() && filepath.DirCount(path) <= maxDepth {
// size, err := dirSize(path)
// if err != nil {
// fmt.Printf("Error: %v\n", err)
// return nil
// }
// fmt.Printf("%s: %d bytes\n", path, size)
// }
// return nil
// })
// if err != nil {
// fmt.Println(err)
// }
//
//
//}

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# 需要修改的部分
# 需要修改的部分
# Socks5
install_socks5() {
}
# MINIO 安装
install_minio_server() {
}
##
# RabbitMQ 安装 初始化

View File

@@ -2,6 +2,7 @@ package main
import (
"fmt"
"regexp"
"strings"
)
@@ -25,6 +26,17 @@ func splitTest() {
func main() {
splitTest()
inputList := []string{
"4.1.6-xxx",
"5.1.0",
"3.2.0-0123-123",
}
r, _ := regexp.Compile(`\d+.+\d+.+\d+`)
for _, input := range inputList {
matches := r.FindAllString(input, -1)
for _, match := range matches {
fmt.Println(match) // Output: 4.1.6 5.1.0 3.2.0
}
}
}

View File

@@ -8,7 +8,7 @@ import (
"wdd.io/agent-common/utils"
)
var CmiiOperator = CmiiK8sOperator{}
var DefaultCmiiOperator = CmiiK8sOperator{}
// var updateLogPath = "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\cmii_operator\\log\\cmii-update-log.txt"
var updateLogPath = "/home/wdd/IdeaProjects/ProjectOctopus/cmii_operator/log/cmii-update-log.txt"
@@ -19,7 +19,7 @@ func FindAppNotHealthyOrRestartCountGreaterThanN(cmiiEnv string, restartCount in
//podInterface := CmiiPodInterface{}
// get all pods
podAll := CmiiOperator.PodAllInterface(cmiiEnv)
podAll := DefaultCmiiOperator.PodAllInterface(cmiiEnv)
// restart map
restartMap := make(map[string]int32, len(podAll))
@@ -47,7 +47,7 @@ func FindAppNotHealthyOrRestartCountGreaterThanN(cmiiEnv string, restartCount in
// find deployment convert to interface
for key, value := range restartMap {
// container Name must equals deployment name
deployment := CmiiOperator.DeploymentOneInterface(cmiiEnv, key)
deployment := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, key)
if deployment != nil {
// deployment exists
log.DebugF("[FindAppNotHealthyOrRestartCountGreaterThanN] - restart [%s] [%s] is [%d]", cmiiEnv, key, value)
@@ -63,7 +63,7 @@ func FindAppNotHealthyOrRestartCountGreaterThanN(cmiiEnv string, restartCount in
func FindDeploymentReplicasSmallerThanN(cmiiEnv string, replicasMin int32) (deploymentList []CmiiDeploymentInterface) {
// get all deployments
cmiiDeploymentInterfaces := CmiiOperator.DeploymentAllInterface(cmiiEnv)
cmiiDeploymentInterfaces := DefaultCmiiOperator.DeploymentAllInterface(cmiiEnv)
cmiiDeploymentInterfaces = FilterAllCmiiAppSoft(cmiiDeploymentInterfaces)
// filter
@@ -80,13 +80,13 @@ func FindDeploymentReplicasSmallerThanN(cmiiEnv string, replicasMin int32) (depl
func FindDeploymentNotHealthy(cmiiEnv string) (deploymentList []CmiiDeploymentInterface) {
// all unhealthy pods
allInterface := CmiiOperator.PodAllInterface(cmiiEnv)
allInterface := DefaultCmiiOperator.PodAllInterface(cmiiEnv)
// find the deployments
for _, podInterface := range allInterface {
if !podInterface.PodStatus {
// unhealthy pod
deploymentInterface := CmiiOperator.DeploymentOneInterface(cmiiEnv, podInterface.ContainerName)
deploymentInterface := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, podInterface.ContainerName)
if deploymentInterface != nil {
deploymentList = append(deploymentList, *deploymentInterface)
}
@@ -98,10 +98,10 @@ func FindDeploymentNotHealthy(cmiiEnv string) (deploymentList []CmiiDeploymentIn
func FindAllNodeNotHealthy() (nodeList []CmiiNodeInterface) {
// dev-cluster
devNodeList := CmiiOperator.NodeAllInterface("dev")
devNodeList := DefaultCmiiOperator.NodeAllInterface("dev")
// core-cluster
coreNodeList := CmiiOperator.NodeAllInterface("uat")
coreNodeList := DefaultCmiiOperator.NodeAllInterface("uat")
// append
coreNodeList = append(coreNodeList, devNodeList...)
@@ -129,7 +129,7 @@ func FindAllNodeNotHealthy() (nodeList []CmiiNodeInterface) {
func FindPodNotHealthy(cmiiEnv string) (podList []CmiiPodInterface) {
// all unhealthy pods
allInterface := CmiiOperator.PodAllInterface(cmiiEnv)
allInterface := DefaultCmiiOperator.PodAllInterface(cmiiEnv)
// find the deployments
for _, podInterface := range allInterface {
@@ -145,7 +145,7 @@ func FindPodNotHealthy(cmiiEnv string) (podList []CmiiPodInterface) {
func GetDeploymentGitInfoFromInnerEnv(cmiiEnv, appName string) (gitBranch, gitCommit string) {
// get app
podList := CmiiOperator.PodByAppName(cmiiEnv, appName)
podList := DefaultCmiiOperator.PodByAppName(cmiiEnv, appName)
// get pod
if podList == nil || len(podList) == 0 {
@@ -154,7 +154,7 @@ func GetDeploymentGitInfoFromInnerEnv(cmiiEnv, appName string) (gitBranch, gitCo
}
// exec env
stdout, stderr := CmiiOperator.PodExec(cmiiEnv, podList[0], []string{"env"})
stdout, stderr := DefaultCmiiOperator.PodExec(cmiiEnv, podList[0], []string{"env"})
errLog := stderr.String()
if errLog != "" {
@@ -182,7 +182,7 @@ func GetDeploymentGitInfoFromInnerEnv(cmiiEnv, appName string) (gitBranch, gitCo
func FindCmiiMiddlewarePodInterface(cmiiEnv string) (podList []CmiiPodInterface) {
cmiiPodInterfaces := CmiiOperator.PodAllInterface(cmiiEnv)
cmiiPodInterfaces := DefaultCmiiOperator.PodAllInterface(cmiiEnv)
for _, podInterface := range cmiiPodInterfaces {
for key := range CmiiMiddlewareNameMap {
@@ -219,9 +219,9 @@ func ScaleDeploymentToDesiredReplicasFromMap(cmiiEnv string, nameReplicasMap map
// 遍历
for appName, replica := range nameReplicasMap {
exists := CmiiOperator.DeploymentExist(cmiiEnv, appName)
exists := DefaultCmiiOperator.DeploymentExist(cmiiEnv, appName)
if exists != nil {
scale := CmiiOperator.DeploymentScale(cmiiEnv, appName, replica)
scale := DefaultCmiiOperator.DeploymentScale(cmiiEnv, appName, replica)
if !scale {
errorUpdateMap[appName] = replica
}
@@ -238,12 +238,12 @@ func RestartDeploymentFromList(deploymentList []CmiiDeploymentInterface) bool {
result := true
for _, deployment := range deploymentList {
result = CmiiOperator.DeploymentScale(deployment.Namespace, deployment.Name, 0)
result = DefaultCmiiOperator.DeploymentScale(deployment.Namespace, deployment.Name, 0)
if !result {
return result
}
time.Sleep(time.Second)
result = CmiiOperator.DeploymentScale(deployment.Namespace, deployment.Name, deployment.Replicas)
result = DefaultCmiiOperator.DeploymentScale(deployment.Namespace, deployment.Name, deployment.Replicas)
if !result {
return result
}
@@ -254,11 +254,11 @@ func RestartDeploymentFromList(deploymentList []CmiiDeploymentInterface) bool {
func RestartCmiiBackendDeployment(cmiiEnv string) {
cmiiDeploymentInterfaces := CmiiOperator.DeploymentAllInterface(cmiiEnv)
cmiiDeploymentInterfaces := DefaultCmiiOperator.DeploymentAllInterface(cmiiEnv)
for _, deploymentInterface := range cmiiDeploymentInterfaces {
if AppNameBelongsToCmiiImage(deploymentInterface.Name) {
if !CmiiOperator.DeploymentRestart(deploymentInterface.Namespace, deploymentInterface.Name) {
if !DefaultCmiiOperator.DeploymentRestart(deploymentInterface.Namespace, deploymentInterface.Name) {
log.ErrorF("[RestartCmiiBackendDeployment] - restart of [%s] [%s] failed !", deploymentInterface.Namespace, deploymentInterface.Name)
} else {
log.DebugF("[RestartCmiiBackendDeployment] - restart of [%s] [%s] success !", deploymentInterface.Namespace, deploymentInterface.Name)
@@ -267,17 +267,17 @@ func RestartCmiiBackendDeployment(cmiiEnv string) {
}
}
log.InfoF("[RestartCmiiBackendDeployment] - restart of all backend app in [%s] success !", CmiiOperator.CurrentNamespace)
log.InfoF("[RestartCmiiBackendDeployment] - restart of all backend app in [%s] success !", DefaultCmiiOperator.CurrentNamespace)
}
func RestartCmiiFrontendDeployment(cmiiEnv string) {
cmiiDeploymentInterfaces := CmiiOperator.DeploymentAllInterface(cmiiEnv)
cmiiDeploymentInterfaces := DefaultCmiiOperator.DeploymentAllInterface(cmiiEnv)
for _, deploymentInterface := range cmiiDeploymentInterfaces {
_, ok := CmiiFrontendAppMap[deploymentInterface.Name]
if ok {
if !CmiiOperator.DeploymentRestart(deploymentInterface.Namespace, deploymentInterface.Name) {
if !DefaultCmiiOperator.DeploymentRestart(deploymentInterface.Namespace, deploymentInterface.Name) {
log.ErrorF("[RestartCmiiFrontendDeployment] - restart of [%s] [%s] failed !", deploymentInterface.Namespace, deploymentInterface.Name)
} else {
log.DebugF("[RestartCmiiFrontendDeployment] - restart of [%s] [%s] success !", deploymentInterface.Namespace, deploymentInterface.Name)
@@ -285,38 +285,39 @@ func RestartCmiiFrontendDeployment(cmiiEnv string) {
}
}
log.InfoF("[RestartCmiiFrontendDeployment] - restart of all backend app in [%s] success !", CmiiOperator.CurrentNamespace)
log.InfoF("[RestartCmiiFrontendDeployment] - restart of all backend app in [%s] success !", DefaultCmiiOperator.CurrentNamespace)
}
func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) bool {
func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) (updateOK bool, oldImageTag, newImageTag string) {
cmiiDeploymentInterface := CmiiOperator.DeploymentOneInterface(cmiiEnv, appName)
cmiiDeploymentInterface := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, appName)
if cmiiDeploymentInterface == nil {
return false
return updateOK, oldImageTag, newImageTag
}
// check if need to update
if cmiiDeploymentInterface.ImageTag == newTag {
oldImageTag = cmiiDeploymentInterface.ImageTag
if oldImageTag == newTag {
log.DebugF("[UpdateCmiiDeploymentImageTag] - [%s] [%s] image tag are the same ! no need to update !", cmiiEnv, appName)
// restart
if CmiiOperator.DeploymentRestart(cmiiEnv, appName) {
return true
if DefaultCmiiOperator.DeploymentRestart(cmiiEnv, appName) {
return true, oldImageTag, oldImageTag
} else {
return false
return false, oldImageTag, oldImageTag
}
}
content := utils.WordSpaceCompletion(utils.TimeSplitFormatString()+" "+cmiiDeploymentInterface.Namespace, 35)
content = utils.WordSpaceCompletion(content+cmiiDeploymentInterface.Name, 75)
content = utils.WordSpaceCompletion(content+cmiiDeploymentInterface.ImageTag, 105)
content = utils.WordSpaceCompletion(content+oldImageTag, 105)
content = content + newTag + "\n"
log.DebugF("[UpdateCmiiDeploymentImageTag] - prepare to update [%s]!", content)
// update
tag := CmiiOperator.DeploymentUpdateTag(cmiiDeploymentInterface.Namespace, cmiiDeploymentInterface.Name, newTag)
tag := DefaultCmiiOperator.DeploymentUpdateTag(cmiiDeploymentInterface.Namespace, cmiiDeploymentInterface.Name, newTag)
if !tag {
log.ErrorF("[UpdateCmiiDeploymentImageTag] - [%s] update failed !", content)
return false
return false, oldImageTag, newImageTag
}
// append log
@@ -324,15 +325,13 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) bool {
// re-get from env
time.Sleep(time.Second)
deploy := CmiiOperator.DeploymentOneInterface(cmiiEnv, appName)
deploy := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, appName)
if deploy == nil {
log.ErrorF("[UpdateCmiiDeploymentImageTag] - unknown error happened ! [%s] [%s] not exists !", cmiiEnv, appName)
return false
return false, oldImageTag, newImageTag
}
// log
//log.InfoF("[UpdateCmiiDeploymentImageTag] - real image tag are [%s] update tag [%s] success ! ", deploy.Image, content)
return true
return true, oldImageTag, deploy.ImageTag
}
func UpdateCmiiImageTagFromNameTagMap(cmiiEnv string, nameTagMap map[string]string) (result map[string]string) {
@@ -340,12 +339,12 @@ func UpdateCmiiImageTagFromNameTagMap(cmiiEnv string, nameTagMap map[string]stri
result = make(map[string]string, len(nameTagMap))
for appName, newTag := range nameTagMap {
if AppNameBelongsToCmiiImage(appName) {
if UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag) {
ok, oldImageTag, newImageTag := UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag)
if ok {
log.InfoF("[UpdateCmiiImageTagFromNameTagMap] - %s %s to %s", cmiiEnv, appName, newTag)
result[appName] = newTag
result[appName] = newImageTag
} else {
result[appName] = "false"
result[appName] = oldImageTag
}
}
}
@@ -396,23 +395,23 @@ func RollBackCmiiDeploymentFromUpdateLog(updateLog string) bool {
}
log.InfoF("[RollBackCmiiDeploymentFromUpdateLog] - rollback [%s] [%s] from [%s] to [%s]", cmiiEnv, appName, newTag, fromTag)
rollback := UpdateCmiiDeploymentImageTag(cmiiEnv, appName, fromTag)
ok, _, _ := UpdateCmiiDeploymentImageTag(cmiiEnv, appName, fromTag)
return rollback
return ok
}
// BackupAllDeploymentFromEnv 从DEMO提取全部的CMII的应用
func BackupAllDeploymentFromEnv(cmiiEnv string) bool {
allInterface := CmiiOperator.DeploymentAllInterface(cmiiEnv)
allInterface := DefaultCmiiOperator.DeploymentAllInterface(cmiiEnv)
// must filter
allInterface = FilterAllCmiiAppSoft(allInterface)
//filePath := "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\cmii_operator\\log\\all-" + CmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
filePath := "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/all-" + CmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
//filePath := "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\cmii_operator\\log\\all-" + DefaultCmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
filePath := "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/all-" + DefaultCmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
log.InfoF("[BackupAllDeploymentFromEnv] - backup all image from %s => %s", CmiiOperator.CurrentNamespace, filePath)
log.InfoF("[BackupAllDeploymentFromEnv] - backup all image from %s => %s", DefaultCmiiOperator.CurrentNamespace, filePath)
firstCol := 0
secondCol := 0
@@ -462,7 +461,7 @@ func BackupAllDeploymentFromEnv(cmiiEnv string) bool {
// BackupAllCmiiDeploymentToMap 备份DEMO环境全部的镜像名称为Map 包括SRS 前端 后端
func BackupAllCmiiDeploymentToMap(cmiiEnv string) (backendMap, frontendMap, srsMap map[string]string) {
allInterface := CmiiOperator.DeploymentAllInterface(cmiiEnv)
allInterface := DefaultCmiiOperator.DeploymentAllInterface(cmiiEnv)
allInterface = FilterAllCmiiAppSoft(allInterface)
backendMap = make(map[string]string, len(allInterface))
@@ -481,7 +480,7 @@ func BackupAllCmiiDeploymentToMap(cmiiEnv string) (backendMap, frontendMap, srsM
for key, value := range CmiiSrsAppMap {
var app *CmiiDeploymentInterface
if strings.Contains(value, "deployment") {
app = CmiiOperator.DeploymentOneInterface(cmiiEnv, key)
app = DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, key)
if app != nil {
for _, imageName := range app.ContainerImageMap {
split := strings.Split(imageName, ":")
@@ -492,7 +491,7 @@ func BackupAllCmiiDeploymentToMap(cmiiEnv string) (backendMap, frontendMap, srsM
}
}
} else if strings.Contains(value, "state") {
app = CmiiOperator.StatefulSetOneInterface(cmiiEnv, key)
app = DefaultCmiiOperator.StatefulSetOneInterface(cmiiEnv, key)
if app != nil {
for _, imageName := range app.ContainerImageMap {
split := strings.Split(imageName, ":")
@@ -531,9 +530,9 @@ func BackupAllCmiiDeploymentToList(cmiiEnv string, completePrefix bool) (allCmii
func BackUpAllCmiiAppImageNameFromEnv(cmiiEnv string) {
CmiiOperator.changeOperatorEnv(cmiiEnv)
//filePath := "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\cmii_operator\\log\\images-" + CmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
filePath := "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/images-" + CmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
DefaultCmiiOperator.changeOperatorEnv(cmiiEnv)
//filePath := "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\cmii_operator\\log\\images-" + DefaultCmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
filePath := "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/log/images-" + DefaultCmiiOperator.CurrentNamespace + "-" + utils.TimeSplitFormatString() + ".txt"
only := make(map[string]string, 150)
// front
@@ -541,7 +540,7 @@ func BackUpAllCmiiAppImageNameFromEnv(cmiiEnv string) {
for key, value := range CmiiFrontendAppMap {
_, ok := only[key]
if !ok {
deploy := CmiiOperator.DeploymentOneInterface(cmiiEnv, key)
deploy := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, key)
if deploy != nil {
only[key] = value
utils.AppendContentToFile(deploy.Image+"\n", filePath)
@@ -552,7 +551,7 @@ func BackUpAllCmiiAppImageNameFromEnv(cmiiEnv string) {
for key, value := range CmiiBackendAppMap {
_, ok := only[key]
if !ok {
deploy := CmiiOperator.DeploymentOneInterface(cmiiEnv, key)
deploy := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, key)
if deploy != nil {
only[key] = value
utils.AppendContentToFile(deploy.Image+"\n", filePath)
@@ -565,7 +564,7 @@ func BackUpAllCmiiAppImageNameFromEnv(cmiiEnv string) {
for key, value := range CmiiGISAppMap {
_, ok := only[key]
if !ok {
deploy := CmiiOperator.DeploymentOneInterface(cmiiEnv, key)
deploy := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, key)
if deploy != nil {
only[key] = value
utils.AppendContentToFile(deploy.Image+"\n", filePath)
@@ -579,13 +578,13 @@ func BackUpAllCmiiAppImageNameFromEnv(cmiiEnv string) {
if !ok {
var app *CmiiDeploymentInterface
if strings.Contains(value, "deployment") {
app = CmiiOperator.DeploymentOneInterface(cmiiEnv, key)
app = DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, key)
if app != nil {
only[key] = value
utils.AppendContentToFile(app.Image+"\n", filePath)
}
} else if strings.Contains(value, "state") {
app = CmiiOperator.StatefulSetOneInterface(cmiiEnv, key)
app = DefaultCmiiOperator.StatefulSetOneInterface(cmiiEnv, key)
if app != nil {
only[key] = value
for _, imageName := range app.ContainerImageMap {

View File

@@ -2,9 +2,11 @@ package main
import (
"fmt"
"strconv"
"testing"
"time"
"wdd.io/agent-common/assert"
"wdd.io/agent-common/pusher"
"wdd.io/agent-common/utils"
)
@@ -109,7 +111,7 @@ func TestFindPodNotHealthy_And_Delete(t *testing.T) {
for _, podInterface := range podNotHealthy {
t.Logf("[%s] [%s]", podInterface.Name, podInterface.PodPhase)
podDelete := CmiiOperator.PodDelete(podInterface.Namespace, podInterface.Name)
podDelete := DefaultCmiiOperator.PodDelete(podInterface.Namespace, podInterface.Name)
assert.Equal(t, podDelete, true, "delete of ", podInterface.Namespace, podInterface.Name, " failed !")
}
@@ -117,12 +119,12 @@ func TestFindPodNotHealthy_And_Delete(t *testing.T) {
}
func TestFilterAllCmiiAppStrict(t *testing.T) {
allInterface := CmiiOperator.DeploymentAllInterface("devflight")
allInterface := DefaultCmiiOperator.DeploymentAllInterface("devflight")
FilterAllCmiiAppStrict(allInterface)
}
func TestRestartDeploymentFromList(t *testing.T) {
allInterface := CmiiOperator.DeploymentAllInterface("devflight")
allInterface := DefaultCmiiOperator.DeploymentAllInterface("devflight")
allInterface = FilterAllCmiiAppSoft(allInterface)
RestartDeploymentFromList(allInterface)
@@ -239,10 +241,10 @@ func TestRestartCmiiDeployment(t *testing.T) {
cmiiEnv := integration
appName := "cmii-uav-platform"
kill := CmiiOperator.DeploymentRestartByKill(cmiiEnv, appName)
kill := DefaultCmiiOperator.DeploymentRestartByKill(cmiiEnv, appName)
assert.Equal(t, kill, true, "have unhealthy pod !")
check := CmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
check := DefaultCmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
assert.Equal(t, check, true, "DeploymentStatusCheck failed !")
}
@@ -250,7 +252,7 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) {
// 计算20:00的时间
now := time.Now()
targetTime := time.Date(now.Year(), now.Month(), now.Day(), 17, 57, 00, 0, now.Location())
targetTime := time.Date(now.Year(), now.Month(), now.Day(), 17, 45, 00, 0, now.Location())
duration := time.Duration(0)
@@ -272,18 +274,30 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) {
//newTag := "5.4.0-032601"
appNameTagMap := map[string]string{
"cmii-uav-platform-media": "5.4.0",
//"cmii-uav-multilink": "5.5.0",
"cmii-uav-data-post-process": "5.5.0-042501",
}
for appName, newTag := range appNameTagMap {
tag := UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag)
assert.Equal(t, tag, true, "update image tag failed !")
ok, oldImageTag, newImageTag := UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag)
assert.Equal(t, ok, true, "update image tag failed !")
utils.SplitLinePrint()
check := CmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 300)
check := DefaultCmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 300)
assert.Equal(t, check, true, "deployment run failed!")
deploy := DefaultCmiiOperator.DeploymentOneInterface(cmiiEnv, appName)
// push message
message := pusher.CmiiUpdateMessage{
Namespace: cmiiEnv,
AppName: appName,
FromTag: oldImageTag,
ToTag: newImageTag,
Replicas: strconv.FormatInt(int64(deploy.Replicas), 10),
DeployStatus: check,
}
message.SendMessage()
}

View File

@@ -198,7 +198,7 @@ func (op *MinioOperator) UploadFile(bucketNameWithSuffix, filePath, fileName str
// 使用PutObject上传文件
// fileName ==> tmp/123/123.txt
// realFileName ==> 123.txt
log.InfoF("[UploadFile] - upload from [%s] to [%s]", filePath+realFileName, op.MinioEndpoint+"/"+bucketNameWithSuffix+"/"+fileName)
log.InfoF("[UploadFile] - upload from [%s] to [%s]", filePath+realFileName, op.MinioEndpoint+"/"+bucketNameWithSuffix+fileName)
n, err := op.Client.FPutObject(bucketNameWithSuffix, fileName, filePath+realFileName, minio.PutObjectOptions{})
if err != nil {
log.ErrorF("[UploadFile] - upload [%s] to [%s] error %s", filePath+realFileName, op.MinioEndpoint+"/"+bucketNameWithSuffix+"/"+fileName, err.Error())

View File

@@ -1,12 +1,8 @@
package main
import (
"bufio"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
@@ -15,7 +11,6 @@ import (
"wdd.io/agent-operator/image"
)
const OfflineImageGzipFolderPrefix = "/root/octopus_image/"
const OfflineDeployHarborHost = "harbor.wdd.io"
const PublicDeployHarborHost = "42.192.52.227"
const DirectPushDeployHarborHost = "36.134.71.138"
@@ -60,22 +55,23 @@ func (sync ImageSyncEntity) PullFromEntityAndSyncConditionally() (imageSyncResul
// compress
if sync.ProjectVersion != "" {
// get version images
// 获取特定版本的镜像
errorPullImageList, errorGzipImageList, allCmiiImageNameList, allGzipFileNameList = DownloadCompressUploadFromVersion(sync.ProjectVersion, sync.CompressImageToGzip, sync.UploadToDemoMinio)
gzipFolderFullPath = OfflineImageGzipFolderPrefix + sync.ProjectVersion
gzipFolderFullPath = image.OfflineImageGzipFolderPrefix + sync.ProjectVersion
} else {
// get demo images
// 获取DEMO的镜像
errorPullImageList, errorGzipImageList, allCmiiImageNameList, allGzipFileNameList = DownloadCompressUploadFromDemo(sync.ProjectName, sync.CompressImageToGzip, sync.UploadToDemoMinio)
gzipFolderFullPath = OfflineImageGzipFolderPrefix + sync.ProjectName
gzipFolderFullPath = image.OfflineImageGzipFolderPrefix + sync.ProjectName
}
} else {
// 拉取特定的镜像
gzipFolderFullPath = OfflineImageGzipFolderPrefix + "tmp"
gzipFolderFullPath = image.OfflineImageGzipFolderPrefix + "tmp"
// 组装镜像名称
allCmiiImageNameList = concatAndUniformCmiiImage(sync.FullNameImageList, sync.CmiiNameTagList)
@@ -148,9 +144,13 @@ func DownloadCompressUpload(fullNameList []string, shouldGzip bool, gzipFolderFu
log.Info("COMPRESS START")
for _, imageFullName := range fullNameList {
if !image.SaveToTarGZ(imageFullName, gzipFolderFullPath) {
ok, gzipImageFileFullPath := image.SaveToGzipFile(imageFullName, gzipFolderFullPath)
if !ok {
errorGzipImageList = append(errorGzipImageList, imageFullName)
continue
}
// 压缩成功
allGzipFileNameList = append(allGzipFileNameList, gzipImageFileFullPath)
}
// remove failed
fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool {
@@ -163,29 +163,17 @@ func DownloadCompressUpload(fullNameList []string, shouldGzip bool, gzipFolderFu
//uploadGzipFileToDemoMinio()
// get gzip file name list
log.Info("UPLOAD OSS START !")
err := filepath.WalkDir(gzipFolderFullPath, func(path string, d fs.DirEntry, err error) error {
if err != nil {
log.ErrorF("error getting gzip file name list 1! %s", err.Error())
}
if !d.IsDir() {
allGzipFileNameList = append(allGzipFileNameList, d.Name())
}
return nil
})
if err != nil {
log.ErrorF("error getting gzip file name list 2! %s", err.Error())
}
// start to upload
// extract demo oss location suffix from gzipFolderFullPath
trimPrefix := strings.TrimPrefix(gzipFolderFullPath, OfflineImageGzipFolderPrefix)
bucketName := "cmlc-installation/" + trimPrefix
log.InfoF("gzip file location in demo oss is %s", DefaultDemoEndpoint+"/"+bucketName)
trimPrefix := strings.TrimPrefix(gzipFolderFullPath, image.OfflineImageGzipFolderPrefix)
bucketNameWithPrefix := "cmlc-installation/" + trimPrefix
log.InfoF("gzip file location in demo oss is %s", DefaultDemoEndpoint+"/"+bucketNameWithPrefix)
minioOperator := CmiiMinioOperator{}
for _, gzipFileName := range allGzipFileNameList {
if !minioOperator.UploadToDemo(bucketName, gzipFolderFullPath, gzipFileName) {
// SaveToGzipFile 返回的是全路径 归一化处理 gzip file name
gzipFileName = strings.TrimPrefix(gzipFileName, gzipFolderFullPath)
if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipFolderFullPath, gzipFileName) {
log.ErrorF("upload of %s to demo oss error !", gzipFolderFullPath+gzipFileName)
}
}
@@ -195,64 +183,79 @@ func DownloadCompressUpload(fullNameList []string, shouldGzip bool, gzipFolderFu
return errorPullImageList, errorGzipImageList, fullNameList, allGzipFileNameList
}
// DownloadLoadTagPush DLTU procedure ImageSync的另外一般流程需要支持 堡垒机(纯离线)的模式
// DownloadLoadTagUpload DLTU procedure ImageSync的另外一般流程需要支持 堡垒机(纯离线)的模式
// 2. Gzip文件目录RKE MIDDLE CMII三个文件目录 - 约定目录
// 约定目录 /root/wdd/image/rke/ /root/wdd/image/middle/ /root/wdd/image/cmii/
// 3. 读取本机的IP地址 - 参数传递
// 4. OSS地址 - ossUrlPrefix传空 则使用默认值
// 5. ossFileName - 如果结尾为txt则为文件的形式如果为tar.gz则为gzip文件夹的形式
func DownloadLoadTagPush(downloadFromOss bool, ossUrlPrefix, ossFileName, localGzipFolder string, targetHarborFullName string) []string {
func DownloadLoadTagUpload(downloadFromOss bool, ossUrlPrefix, ossFileName, localGzipFolderOrGzipFile string, targetHarborFullName string) (targetImageFullNameList []string) {
// 支持单文件的形式
if !utils.IsFileOrDir(localGzipFolderOrGzipFile) {
// 单个压缩文件
if !strings.HasSuffix(localGzipFolderOrGzipFile, ".tar.gz") {
log.ErrorF("local gzip file %s is not a .tar.gz file !", localGzipFolderOrGzipFile)
return nil
}
// load
image.LoadFromGzipFilePath(localGzipFolderOrGzipFile)
}
separator := os.PathSeparator
if !strings.HasSuffix(localGzipFolder, string(separator)) {
localGzipFolder += string(separator)
if !strings.HasSuffix(localGzipFolderOrGzipFile, string(separator)) {
localGzipFolderOrGzipFile += string(separator)
}
// download
if downloadFromOss {
if !parseAndDownloadFromOss(ossUrlPrefix, ossFileName, localGzipFolder) {
if !parseAndDownloadFromOss(ossUrlPrefix, ossFileName, localGzipFolderOrGzipFile) {
log.ErrorF("download from oss error !")
return nil
}
}
// load
loadAllGzipImageFromLocalFolder(localGzipFolder)
image.LoadFromFolderPath(localGzipFolder)
loadAllGzipImageFromLocalFolder(localGzipFolderOrGzipFile)
// tag
// push
allFileInFolder, err := utils.ListAllFileInFolder(localGzipFolder)
allFileInFolder, err := utils.ListAllFileInFolder(localGzipFolderOrGzipFile)
if err != nil {
return nil
}
for _, gzipFileName := range allFileInFolder {
// 过滤非.tar.gz结尾的文件
if !strings.HasSuffix(gzipFileName, ".tar.gz") {
continue
}
log.DebugF("gzip file name is %s", gzipFileName)
// gzip to image full name 拿到镜像的原始名称
imageFullName := image2.GzipFileNameToImageFullName(gzipFileName)
if imageFullName == "" {
log.ErrorF("gzip file %s to image full name error !", gzipFileName)
continue
}
// tag 拿到目标名称 然后重新Tag
targetImageFullName := image2.ImageNameToTargetImageFullName(imageFullName, targetHarborFullName)
// tag
image.TagFromSourceToTarget(imageFullName, targetImageFullName)
//push
pushResult := image.PushToOctopusKindHarbor(targetImageFullName)
defer pushResult.Close()
scanner := bufio.NewScanner(pushResult)
for scanner.Scan() {
// uploadToHarbor 上传到目标Harbor
if image.UploadToHarbor(targetImageFullName) {
targetImageFullNameList = append(targetImageFullNameList, targetHarborFullName)
} else {
log.ErrorF("upload to harbor error of %s", targetImageFullName)
}
fmt.Println()
fmt.Printf("%s to %s push success !", gzipFileName, targetImageFullName)
fmt.Println()
}
return nil
return targetImageFullNameList
}
func loadAllGzipImageFromLocalFolder(localGzipFolder string) {
image.LoadFromFolderPath(localGzipFolder)
}
func parseAndDownloadFromOss(ossUrlPrefix, ossFileName, localGzipFolder string) bool {
@@ -291,10 +294,10 @@ func parseAndDownloadFromOss(ossUrlPrefix, ossFileName, localGzipFolder string)
func DownloadCompressUploadFromDemo(projectName string, shouldGzip bool, shouldOss bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList []string) {
// generate a project folder
err := os.MkdirAll(OfflineImageGzipFolderPrefix+projectName, os.ModeDir)
err := os.MkdirAll(image.OfflineImageGzipFolderPrefix+projectName, os.ModeDir)
if err != nil {
if !errors.Is(err, os.ErrExist) {
log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", OfflineImageGzipFolderPrefix+projectName, err.Error())
log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", image.OfflineImageGzipFolderPrefix+projectName, err.Error())
return errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList
}
}
@@ -304,7 +307,7 @@ func DownloadCompressUploadFromDemo(projectName string, shouldGzip bool, shouldO
// do work
// DCU
return DownloadCompressUpload(allCmiiImageNameListFromDemo, shouldGzip, OfflineImageGzipFolderPrefix+projectName, shouldOss)
return DownloadCompressUpload(allCmiiImageNameListFromDemo, shouldGzip, image.OfflineImageGzipFolderPrefix+projectName, shouldOss)
}
func buildAllCmiiImageNameListFromDemo(projectName string) []string {
@@ -314,9 +317,9 @@ func buildAllCmiiImageNameListFromDemo(projectName string) []string {
backendMap, frontendMap, srsMap := BackupAllCmiiDeploymentToMap(demo)
// save map to file
backendMapFile := OfflineImageGzipFolderPrefix + projectName + "-backend-app.json"
frontendMapFile := OfflineImageGzipFolderPrefix + projectName + "-frontend-app.json"
srsMapFile := OfflineImageGzipFolderPrefix + projectName + "-srs-app.json"
backendMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-backend-app.json"
frontendMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-frontend-app.json"
srsMapFile := image.OfflineImageGzipFolderPrefix + projectName + "-srs-app.json"
_ = os.Remove(backendMapFile)
_ = os.Remove(frontendMapFile)
_ = os.Remove(srsMapFile)
@@ -334,9 +337,9 @@ func buildAllCmiiImageNameListFromDemo(projectName string) []string {
// srsMapFile,
//)
realCmiiImageName = append(realCmiiImageName, image.ConvertCMiiImageMapToList(backendMap)...)
realCmiiImageName = append(realCmiiImageName, image.ConvertCMiiImageMapToList(frontendMap)...)
realCmiiImageName = append(realCmiiImageName, image.ConvertCMiiImageMapToList(srsMap)...)
realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(backendMap)...)
realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(frontendMap)...)
realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(srsMap)...)
utils.BeautifulPrintListWithTitle(realCmiiImageName, "Cmii Project Image => "+projectName)
@@ -347,10 +350,10 @@ func buildAllCmiiImageNameListFromDemo(projectName string) []string {
func DownloadCompressUploadFromVersion(cmiiVersion string, shouldGzip bool, shouldOss bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList []string) {
// generate a project folder
err := os.MkdirAll(OfflineImageGzipFolderPrefix+cmiiVersion, os.ModeDir)
err := os.MkdirAll(image.OfflineImageGzipFolderPrefix+cmiiVersion, os.ModeDir)
if err != nil {
if !errors.Is(err, os.ErrExist) {
log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", OfflineImageGzipFolderPrefix+cmiiVersion, err.Error())
log.ErrorF("[Download_Compress_Upload_From_Demo] - create folder of %s error %s", image.OfflineImageGzipFolderPrefix+cmiiVersion, err.Error())
return errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList
}
}
@@ -360,7 +363,7 @@ func DownloadCompressUploadFromVersion(cmiiVersion string, shouldGzip bool, shou
// do work
// DCU procedure
return DownloadCompressUpload(realCmiiImageName, shouldGzip, OfflineImageGzipFolderPrefix+cmiiVersion, shouldOss)
return DownloadCompressUpload(realCmiiImageName, shouldGzip, image.OfflineImageGzipFolderPrefix+cmiiVersion, shouldOss)
}
@@ -378,18 +381,18 @@ func buildAllCmiiImageNameListFromVersion(cmiiVersion string) []string {
frontendMap[app] = cmiiVersion
}
realCmiiImageName = append(realCmiiImageName, image.ConvertCMiiImageMapToList(backendMap)...)
realCmiiImageName = append(realCmiiImageName, image.ConvertCMiiImageMapToList(frontendMap)...)
realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(backendMap)...)
realCmiiImageName = append(realCmiiImageName, image.CmiiImageMapToFullNameList(frontendMap)...)
for key, value := range CmiiSrsAppMap {
var app *CmiiDeploymentInterface
if strings.Contains(value, "deployment") {
app = CmiiOperator.DeploymentOneInterface(demo, key)
app = DefaultCmiiOperator.DeploymentOneInterface(demo, key)
if app != nil {
realCmiiImageName = append(realCmiiImageName, app.Image)
}
} else if strings.Contains(value, "state") {
app = CmiiOperator.StatefulSetOneInterface(demo, key)
app = DefaultCmiiOperator.StatefulSetOneInterface(demo, key)
if app != nil {
for _, imageName := range app.ContainerImageMap {
realCmiiImageName = append(realCmiiImageName, imageName)
@@ -402,16 +405,16 @@ func buildAllCmiiImageNameListFromVersion(cmiiVersion string) []string {
}
func DownloadCompressUploadDependency(shouldGzip bool, shouldOss bool, downloadMiddle bool, downloadRke bool) (errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList []string) {
err := os.MkdirAll(OfflineImageGzipFolderPrefix, os.ModeDir)
err := os.MkdirAll(image.OfflineImageGzipFolderPrefix, os.ModeDir)
if err != nil {
if !errors.Is(err, os.ErrExist) {
log.ErrorF("[FetchDependencyRepos] - create folder of %s error %s", OfflineImageGzipFolderPrefix, err.Error())
log.ErrorF("[FetchDependencyRepos] - create folder of %s error %s", image.OfflineImageGzipFolderPrefix, err.Error())
}
}
if downloadMiddle {
gzipFolderPrefix := OfflineImageGzipFolderPrefix + "middle/"
gzipFolderPrefix := image.OfflineImageGzipFolderPrefix + "middle/"
// remove folder first
utils.RemoveFolderComplete(gzipFolderPrefix)
@@ -420,7 +423,7 @@ func DownloadCompressUploadDependency(shouldGzip bool, shouldOss bool, downloadM
}
if downloadRke {
gzipFolderPrefix := OfflineImageGzipFolderPrefix + "rke/"
gzipFolderPrefix := image.OfflineImageGzipFolderPrefix + "rke/"
return DownloadCompressUpload(image.MiddlewareAmd64, shouldGzip, gzipFolderPrefix, shouldOss)
}
@@ -430,7 +433,7 @@ func DownloadCompressUploadDependency(shouldGzip bool, shouldOss bool, downloadM
func LoadSplitCmiiGzipImageToTargetHarbor(projectName, targetHarborHost string) (errorLoadImageNameList, errorPushImageNameList []string) {
// list folder
projectGzipFolder := OfflineImageGzipFolderPrefix + projectName
projectGzipFolder := image.OfflineImageGzipFolderPrefix + projectName
errorLoadImageNameList = append(errorLoadImageNameList, image.LoadFromFolderPath(projectGzipFolder)...)
// read from json
errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(image.Cmii520DemoImageList, targetHarborHost)...)
@@ -446,8 +449,8 @@ func LoadSplitCmiiGzipImageToTargetHarbor(projectName, targetHarborHost string)
func LoadSplitDepGzipImageToTargetHarbor(targetHarborHost string) (errorLoadImageNameList []string, errorPushImageNameList []string) {
//middle := OfflineImageGzipFolderPrefix + "middle/"
//rke := OfflineImageGzipFolderPrefix + "rke/"
//middle := image.OfflineImageGzipFolderPrefix + "middle/"
//rke := image.OfflineImageGzipFolderPrefix + "rke/"
//errorLoadImageNameList = append(errorLoadImageNameList, ImageLoadFromFolderPath(middle)...)
//errorLoadImageNameList = append(errorLoadImageNameList, ImageLoadFromFolderPath(rke)...)

View File

@@ -38,81 +38,14 @@ func TestPullFromEntityAndSyncConditionally(t *testing.T) {
// 创建一个模拟的sync对象用于测试函数的行为。这里需要根据你的实际需求来设置mock数据和预期结果。
sync := ImageSyncEntity{
CmiiNameTagList: []string{
"cmii-uav-tower:5.4.0-0319",
"cmii-uav-platform-logistics:5.4.0",
"cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"cmii-uav-platform-securityh5:5.4.0",
"cmii-uav-platform:5.4.0-25263-041102",
"cmii-uav-platform-ai-brain:5.4.0",
"cmii-uav-emergency:5.3.0",
"cmii-uav-kpi-monitor:5.4.0",
"cmii-uav-platform-splice:5.4.0-040301",
"cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"cmii-live-operator:5.2.0",
"cmii-uav-gateway:5.4.0",
"cmii-uav-platform-security:4.1.6",
"cmii-uav-integration:5.4.0-25916",
"cmii-uav-notice:5.4.0",
"cmii-uav-platform-open:5.4.0",
"cmii-srs-oss-adaptor:2023-SA",
"cmii-admin-gateway:5.4.0",
"cmii-uav-process:5.4.0-0410",
"cmii-suav-supervision:5.4.0-032501",
"cmii-uav-platform-cms-portal:5.4.0",
"cmii-uav-platform-multiterminal:5.4.0",
"cmii-admin-data:5.4.0-0403",
"cmii-uav-cloud-live:5.4.0",
"cmii-uav-grid-datasource:5.2.0-24810",
"cmii-uav-platform-qingdao:4.1.6-24238-qingdao",
"cmii-admin-user:5.4.0",
"cmii-uav-industrial-portfolio:5.4.0-28027-041102",
"cmii-uav-alarm:5.4.0-0409",
"cmii-uav-clusters:5.2.0",
"cmii-uav-platform-oms:5.4.0",
"cmii-uav-platform-hljtt:5.3.0-hjltt",
"cmii-uav-platform-mws:5.4.0",
"cmii-uav-autowaypoint:4.1.6-cm",
"cmii-uav-grid-manage:5.1.0",
"cmii-uav-platform-share:5.4.0",
"cmii-uav-cms:5.3.0",
"cmii-uav-oauth:5.4.0-032901",
"cmii-open-gateway:5.4.0",
"cmii-uav-data-post-process:5.4.0",
"cmii-uav-multilink:5.4.0-032701",
"cmii-uav-platform-media:5.4.0",
"cmii-uav-platform-visualization:5.2.0",
"cmii-uav-platform-emergency-rescue:5.2.0",
"cmii-app-release:4.2.0-validation",
"cmii-uav-device:5.4.0-28028-0409",
"cmii-uav-gis-server:5.4.0",
"cmii-uav-brain:5.4.0",
"cmii-uav-depotautoreturn:5.4.0",
"cmii-uav-threedsimulation:5.1.0",
"cmii-uav-grid-engine:5.1.0",
"cmii-uav-developer:5.4.0-040701",
"cmii-uav-waypoint:5.4.0-032901",
"cmii-uav-platform-base:5.4.0",
"cmii-uav-platform-threedsimulation:5.2.0-21392",
"cmii-uav-platform-detection:5.4.0",
"cmii-uav-logger:5.4.0-0319",
"cmii-uav-platform-seniclive:5.2.0",
"cmii-suav-platform-supervisionh5:5.4.0",
"cmii-uav-user:5.4.0",
"cmii-uav-surveillance:5.4.0-28028-0409",
"cmii-uav-mission:5.4.0-28028-041006",
"cmii-uav-mqtthandler:5.4.0-25916-041001",
"srs:v5.0.195",
"cmii-uav-material-warehouse:5.4.0-0407",
"cmii-uav-platform-armypeople:5.4.0-041201",
"cmii-suav-platform-supervision:5.4.0",
"cmii-uav-airspace:5.4.0-0402",
"cmii-uav-platform:5.5.0-offline",
},
FullNameImageList: nil,
ProjectVersion: "",
DirectHarborHost: "harbor.wdd.io",
CompressImageToGzip: false,
UploadToDemoMinio: false,
ShouldDirectPushToHarbor: true,
CompressImageToGzip: true,
UploadToDemoMinio: true,
ShouldDirectPushToHarbor: false,
}
// 调用函数并获取结果。这里需要根据你的实际需求来验证返回的结果是否符合预期。

View File

@@ -386,6 +386,25 @@ func (op *CmiiK8sOperator) DeploymentScale(cmiiEnv, appName string, scaleCount i
return true
}
func (op *CmiiK8sOperator) DeploymentUpdateTagByImageFullName(cmiiEnv, imageFullName string) bool {
split := strings.Split(imageFullName, ":")
// harbor
// 192.168.6.6:8033/rancher/k8s-dns-sidecar:v1.0.2
newTag := split[1]
appName := strings.Split(split[0], "/")[len(strings.Split(split[0], "/"))-1]
if strings.Contains(imageFullName, "8033") {
newTag = split[2]
appName = strings.Split(split[1], "/")[len(strings.Split(split[1], "/"))-1]
}
// 拿到AppName
log.DebugF("DeploymentUpdateTagByImageFullName - appName => %s, newTag => %s", appName, newTag)
return op.DeploymentUpdateTag(cmiiEnv, appName, newTag)
}
func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string) bool {
@@ -400,24 +419,46 @@ func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string)
}
containers := deployment.Spec.Template.Spec.Containers
if len(containers) == 1 {
// only update this kind
if len(containers) >= 2 {
log.ErrorF("[DeploymentUpdateTag] - cant update app with 2 containers !")
return false
}
// 只支持container的数量为1的形式
container := containers[0]
oldName := container.Image
split := strings.Split(container.Image, ":")
if strings.HasPrefix(container.Image, image2.CmiiHarborPrefix) {
// harbor
container.Image = split[0] + ":" + newTag
} else if strings.Contains(container.Image, "8033") {
// 192.168.6.6:8033/rancher/k8s-dns-sidecar:v1.0.2
// 重新拼接
container.Image = split[0] + ":" + split[1] + ":" + newTag
}
log.DebugF("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s]", op.CurrentNamespace, appName, oldName, container.Image)
// re assign
// 更新Cmii BIZ_GROUP
tagVersion := newTag
if strings.Contains(newTag, "-") {
tagVersion = strings.Split(newTag, "-")[0]
}
envList := container.Env
for _, envVar := range envList {
if envVar.Name == "IMAGE_VERSION" {
envVar.Value = tagVersion
}
if envVar.Name == "BIZ_CONFIG_GROUP" {
envVar.Value = tagVersion
}
if envVar.Name == "SYS_CONFIG_GROUP" {
envVar.Value = tagVersion
}
}
log.DebugF("[DeploymentUpdateTag] - update env IMAGE_VERSION to [%s]", tagVersion)
// 赋值回去 很关键
deployment.Spec.Template.Spec.Containers[0] = container
// update
@@ -426,10 +467,6 @@ func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string)
log.ErrorF("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s] error ! %s", op.CurrentNamespace, appName, split[1], container.Image, err.Error())
return false
}
} else if len(containers) == 2 {
log.ErrorF("[DeploymentUpdateTag] - cant update app with 2 containers !")
return false
}
return true
}

View File

@@ -12,7 +12,7 @@ import (
func TestCmiiK8sOperator_DeploymentAll(t *testing.T) {
start := time.Now()
deploymentList := CmiiOperator.DeploymentAll("devflight")
deploymentList := DefaultCmiiOperator.DeploymentAll("devflight")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
@@ -39,7 +39,7 @@ func TestCmiiK8sOperator_DeploymentAll(t *testing.T) {
func TestCmiiK8sOperator_DeploymentAllInterface(t *testing.T) {
start := time.Now()
deploymentList := CmiiOperator.DeploymentAllInterface("devflight")
deploymentList := DefaultCmiiOperator.DeploymentAllInterface("devflight")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
@@ -54,7 +54,7 @@ func TestCmiiK8sOperator_DeploymentAllInterface(t *testing.T) {
func TestCmiiK8sOperator_DeploymentFizz(t *testing.T) {
start := time.Now()
deploymentFizz := CmiiOperator.DeploymentFizz("int", "cmii-suav-supervision")
deploymentFizz := DefaultCmiiOperator.DeploymentFizz("int", "cmii-suav-supervision")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
@@ -75,7 +75,7 @@ func TestCmiiK8sOperator_DeploymentFizz(t *testing.T) {
func TestCmiiK8sOperator_DeploymentScale(t *testing.T) {
start := time.Now()
CmiiOperator.DeploymentScale(demo, "cmii-uav-industrial-portfolio", 1)
DefaultCmiiOperator.DeploymentScale(demo, "cmii-uav-industrial-portfolio", 1)
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
@@ -84,7 +84,7 @@ func TestCmiiK8sOperator_DeploymentScale(t *testing.T) {
func TestCmiiK8sOperator_DeploymentUpdateTag(t *testing.T) {
start := time.Now()
CmiiOperator.DeploymentUpdateTag("demo", "cmii-uav-platform", "5.2.0-011001")
DefaultCmiiOperator.DeploymentUpdateTag("demo", "cmii-uav-platform", "5.2.0-011001")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
}
@@ -94,11 +94,11 @@ func TestCmiiK8sOperator_DeploymentRestart(t *testing.T) {
cmiiEnv := integration
appName := "cmii-uav-data-post-process"
CmiiOperator.DeploymentRestart(cmiiEnv, appName)
DefaultCmiiOperator.DeploymentRestart(cmiiEnv, appName)
utils.SplitLinePrint()
check := CmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
check := DefaultCmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
assert.Equal(t, check, true, "deployment run failed!")
}
@@ -106,19 +106,19 @@ func TestCmiiK8sOperator_DeploymentRestartByKill(t *testing.T) {
cmiiEnv := "demo"
appName := "cmii-uav-platform"
kill := CmiiOperator.DeploymentRestartByKill(cmiiEnv, appName)
kill := DefaultCmiiOperator.DeploymentRestartByKill(cmiiEnv, appName)
assert.Equal(t, kill, true, "deployment restart by kill failed !")
utils.SplitLinePrint()
check := CmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
check := DefaultCmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
assert.Equal(t, check, true, "deployment run failed!")
}
func TestCmiiK8sOperator_DeploymentOneInterface(t *testing.T) {
start := time.Now()
deploy := CmiiOperator.DeploymentOneInterface("devflight", "cmii-uav-depotautoreturn")
deploy := DefaultCmiiOperator.DeploymentOneInterface("devflight", "cmii-uav-depotautoreturn")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
@@ -130,7 +130,7 @@ func TestCmiiK8sOperator_ReplicaSetExists(t *testing.T) {
cmiiEnv := "uavcloud-devflight"
appName := "cmii-admin-data-bf8f87cb7"
exists := CmiiOperator.ReplicaSetExists(cmiiEnv, appName)
exists := DefaultCmiiOperator.ReplicaSetExists(cmiiEnv, appName)
utils.BeautifulPrint(*exists)
}
@@ -140,7 +140,7 @@ func TestCmiiK8sOperator_ReplicaSetByAppName(t *testing.T) {
cmiiEnv := "uavcloud-devflight"
appName := "cmii-admin-data"
exists := CmiiOperator.ReplicaSetByAppName(cmiiEnv, appName)
exists := DefaultCmiiOperator.ReplicaSetByAppName(cmiiEnv, appName)
for _, replicaSet := range exists {
utils.BeautifulPrint(replicaSet)
@@ -149,7 +149,7 @@ func TestCmiiK8sOperator_ReplicaSetByAppName(t *testing.T) {
func TestCmiiK8sOperator_PodAll(t *testing.T) {
start := time.Now()
podList := CmiiOperator.PodAll("devflight")
podList := DefaultCmiiOperator.PodAll("devflight")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
@@ -163,7 +163,7 @@ func TestCmiiK8sOperator_PodAll(t *testing.T) {
func TestCmiiK8sOperator_PodFizz(t *testing.T) {
start := time.Now()
podList := CmiiOperator.PodFizz("devflight", "cmii-uav-data-post-process")
podList := DefaultCmiiOperator.PodFizz("devflight", "cmii-uav-data-post-process")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
t.Logf("pod list lenght is => %d", len(podList))
@@ -183,7 +183,7 @@ func TestCmiiK8sOperator_PodByAppName(t *testing.T) {
cmiiEnv := "uat"
appName := "cmii-admin-data"
exists := CmiiOperator.PodByNodeName(cmiiEnv, appName)
exists := DefaultCmiiOperator.PodByNodeName(cmiiEnv, appName)
for _, podInterface := range exists {
utils.BeautifulPrint(podInterface)
@@ -193,7 +193,7 @@ func TestCmiiK8sOperator_PodByAppName(t *testing.T) {
func TestCmiiK8sOperator_PodFizz2(t *testing.T) {
start := time.Now()
podList := CmiiOperator.PodFizz("devflight", "notice")
podList := DefaultCmiiOperator.PodFizz("devflight", "notice")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
t.Logf("pod list lenght is => %d", len(podList))
@@ -215,13 +215,13 @@ func TestCmiiK8sOperator_PodByNodeName(t *testing.T) {
cmiiEnv := "uat"
nodeName := "test-03.ecs.io"
exists := CmiiOperator.PodByNodeName(cmiiEnv, nodeName)
exists := DefaultCmiiOperator.PodByNodeName(cmiiEnv, nodeName)
exists = FilterAllCmiiPodSoft(exists)
for _, podInterface := range exists {
utils.BeautifulPrint(podInterface)
if !podInterface.PodStatus {
podDelete := CmiiOperator.PodDelete(podInterface.Namespace, podInterface.Name)
podDelete := DefaultCmiiOperator.PodDelete(podInterface.Namespace, podInterface.Name)
assert.Equal(t, podDelete, true, "delete pod failed !")
}
}
@@ -229,9 +229,9 @@ func TestCmiiK8sOperator_PodByNodeName(t *testing.T) {
func TestCmiiK8sOperator_PodExec(t *testing.T) {
podList := CmiiOperator.PodByAppName(devFlight, "cmii-uav-gateway")
podList := DefaultCmiiOperator.PodByAppName(devFlight, "cmii-uav-gateway")
stdout, stderr := CmiiOperator.PodExec(devFlight, podList[0], []string{
stdout, stderr := DefaultCmiiOperator.PodExec(devFlight, podList[0], []string{
"env",
})
@@ -262,14 +262,14 @@ func TestCmiiK8sOperator_DeploymentStatusCheck(t *testing.T) {
cmiiEnv := "devflight"
appName := "cmii-uav-gateway"
check := CmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
check := DefaultCmiiOperator.DeploymentStatusCheck(cmiiEnv, appName, 180)
assert.Equal(t, check, true, "deployment run failed!")
}
func TestCmiiK8sOperator_NodeAll(t *testing.T) {
start := time.Now()
nodeList := CmiiOperator.NodeAll("dev")
nodeList := DefaultCmiiOperator.NodeAll("dev")
elapsed := time.Since(start).Milliseconds()
fmt.Printf("执行耗时: %d ms\n", elapsed)
@@ -282,7 +282,7 @@ func TestCmiiK8sOperator_NodeAll(t *testing.T) {
}
func TestCmiiK8sOperator_NodeAllInterface(t *testing.T) {
cmiiNodeInterfaces := CmiiOperator.NodeAllInterface("uat")
cmiiNodeInterfaces := DefaultCmiiOperator.NodeAllInterface("uat")
for _, nodeInterface := range cmiiNodeInterfaces {
println()

View File

@@ -1,90 +0,0 @@
package deploy
import (
"bytes"
"fmt"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
"sigs.k8s.io/yaml"
"text/template"
"wdd.io/agent-common/utils"
)
type CommonEnvironmentConfig struct {
WebIP string
WebPort string
HarborIP string
HarborPort string
}
type CmiiBackendDeploymentConfig struct {
Namespace string
AppName string
ImageTag string
TagVersion string
Replicas string
NodePort string
NeedPvcCache bool
CustomJvmOpt string
}
type CmiiFrontendDeploymentConfig struct {
Namespace string
AppName string
ImageTag string
TagVersion string
Replicas string
ShortName string
}
func (backend CmiiBackendDeploymentConfig) ParseToApplyConf() *appsv1.DeploymentApplyConfiguration {
// 解析模板
tmpl, err := template.New("cmiiBackendDeploymentTemplate").Parse(cmiiBackendDeploymentTemplate)
if err != nil {
panic(err)
}
// 应用数据并打印结果
var result bytes.Buffer
err = tmpl.Execute(&result, backend)
if err != nil {
panic(err)
}
// 创建Deployment对象
deployment := v1.Deployment{}
err = yaml.Unmarshal(result.Bytes(), &deployment)
if err != nil {
panic(err)
}
utils.BeautifulPrint(&deployment)
// service
parse, err := template.New("cmiiBackendServiceTemplate").Parse(cmiiBackendServiceTemplate)
if err != nil {
panic(err)
}
// 应用数据并打印结果
var resulta bytes.Buffer
err = parse.Execute(&resulta, backend)
if err != nil {
panic(err)
}
fmt.Println(resulta.String())
// 创建Deployment对象
service := corev1.Service{}
err = yaml.Unmarshal(resulta.Bytes(), &service)
if err != nil {
panic(err)
}
utils.BeautifulPrint(&service)
return nil
}

View File

@@ -0,0 +1,134 @@
package deploy
import (
"os"
"wdd.io/agent-common/logger"
"wdd.io/agent-operator/deploy/c_app"
"wdd.io/agent-operator/deploy/z_dep"
"wdd.io/agent-operator/image"
"wdd.io/agent-operator/real_project/zjjt"
)
var log = logger.Log
func OctopusDeploy() {
// common environment
common := &z_dep.CommonEnvironmentConfig{
WebIP: "10.100.2.121",
WebPort: "8888",
HarborIP: "10.100.2.121",
HarborPort: "8033",
Namespace: "zjjt",
TagVersion: "5.5.0",
TenantEnv: "",
MinioPublicIP: "10.100.2.116",
MinioInnerIP: "10.100.2.116",
NFSServerIP: "10.100.2.121",
}
//a_dashboard.K8sDashboardDeploy(common)
//
//a_nfs.NFSDeploy(common)
//a_nfs.NFSTestDeploy(common)
//
//// pvc
//b_middle.PVCDeploy(common)
//
//// middlewares
//b_middle.MidMySQlDeploy(common)
//b_middle.MidRedisDeploy(common)
//b_middle.MidEmqxDeploy(common)
//b_middle.MidMongoDeploy(common)
//b_middle.MidRabbitMQDeploy(common)
//b_middle.MidRabbitMQDeploy(common)
//b_middle.MidNacosDeploy(common)
//
//configMapDeploy(common)
//c_app.IngressDeploy(common)
//
//frontendImageVersionMap, backendImageVersionMap := image.FrontendBackendImageMapFromCmiiImageMap(zjjt.CmiiImageMap)
//
//backendDeploy(common, backendImageVersionMap)
//frontendDeploy(common, frontendImageVersionMap)
c_app.SRSDeploy(common)
}
func CmiiAppDeploy() {
// common environment
common := &z_dep.CommonEnvironmentConfig{
WebIP: "10.100.2.121",
WebPort: "8888",
HarborIP: "10.100.2.121",
HarborPort: "8033",
Namespace: "zjjt",
TagVersion: "5.5.0",
TenantEnv: "",
MinioPublicIP: "10.100.2.116",
MinioInnerIP: "10.100.2.116",
NFSServerIP: "10.100.2.121",
}
frontendImageVersionMap, backendImageVersionMap := image.FrontendBackendImageMapFromCmiiImageMap(zjjt.CmiiImageMap)
backendDeploy(common, backendImageVersionMap)
frontendDeploy(common, frontendImageVersionMap)
configMapDeploy(common)
//c_app.IngressDeploy(common)
}
func backendDeploy(common *z_dep.CommonEnvironmentConfig, backendImageVersionMap map[string]string) {
os.Remove(c_app.BackendApplyFilePath)
for appName, tag := range backendImageVersionMap {
c_app.DefaultCmiiBackendConfig.AppName = appName
c_app.DefaultCmiiBackendConfig.ImageTag = tag
c_app.DefaultCmiiBackendConfig.Replicas = "1"
c_app.DefaultCmiiBackendConfig.BackendDeploy(common)
}
}
func frontendDeploy(common *z_dep.CommonEnvironmentConfig, frontendImageVersionMap map[string]string) {
os.Remove(c_app.FrontendApplyFilePath)
c_app.FrontendDefaultNginxDeploy(common)
for appName, tag := range frontendImageVersionMap {
c_app.DefaultCmiiFrontendConfig.AppName = appName
c_app.DefaultCmiiFrontendConfig.ImageTag = tag
c_app.DefaultCmiiFrontendConfig.Replicas = "1"
value, ok := c_app.FrontendShortNameMaps[appName]
if !ok {
log.ErrorF("FrontendShortNameMaps error ! not contains %s", appName)
continue
}
c_app.DefaultCmiiFrontendConfig.ShortName = value
c_app.DefaultCmiiFrontendConfig.FrontendDeploy(common)
}
}
func configMapDeploy(common *z_dep.CommonEnvironmentConfig) {
os.Remove(c_app.ConfigMapApplyFilePath)
for frontendName, shortName := range c_app.FrontendShortNameMaps {
c_app.DefaultCmiiFrontendConfig.AppName = frontendName
c_app.DefaultCmiiFrontendConfig.ShortName = shortName
value, ok := c_app.FrontendClientIdMaps[frontendName]
if !ok {
log.ErrorF("FrontendClientIdMaps error ! not contains %s", frontendName)
continue
}
c_app.DefaultCmiiFrontendConfig.ClientId = value
c_app.DefaultCmiiFrontendConfig.ConfigMapDeploy(common)
}
}

View File

@@ -0,0 +1,13 @@
package deploy
import "testing"
func TestOctopusDeploy(t *testing.T) {
OctopusDeploy()
}
func TestCmiiAppDeploy(t *testing.T) {
CmiiAppDeploy()
}

View File

@@ -0,0 +1,22 @@
package a_dashboard
import (
"wdd.io/agent-common/logger"
"wdd.io/agent-operator/deploy/z_dep"
)
var (
K8sDashboardApplyFilePath = ""
log = logger.Log
)
func init() {
K8sDashboardApplyFilePath = z_dep.ApplyFilePrefix + "k8s-dashboard.yaml"
log.InfoF("K8sDashboardApplyFilePath: %s", K8sDashboardApplyFilePath)
}
func K8sDashboardDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiK8sDashboardTemplate, K8sDashboardApplyFilePath)
}

View File

@@ -0,0 +1,310 @@
package a_dashboard
const CmiiK8sDashboardTemplate = `
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30554
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
`

View File

@@ -0,0 +1,33 @@
package a_nfs
import (
"wdd.io/agent-common/logger"
"wdd.io/agent-operator/deploy/z_dep"
)
var (
NfsApplyFilePath = ""
NfsTestApplyFilePath = ""
log = logger.Log
)
type NfsDeployConfig struct {
NfsLocalPath string
}
func init() {
NfsApplyFilePath = z_dep.ApplyFilePrefix + "k8s-nfs.yaml"
NfsTestApplyFilePath = z_dep.ApplyFilePrefix + "k8s-nfs-test.yaml"
log.InfoF("NfsApplyFilePath : %s\n", NfsApplyFilePath)
log.InfoF("NfsTestApplyFilePath : %s\n", NfsTestApplyFilePath)
}
func NFSDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiNfsTemplate, NfsApplyFilePath)
}
func NFSTestDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiNFSTestTemplate, NfsTestApplyFilePath)
}

View File

@@ -0,0 +1,115 @@
package a_nfs
const CmiiNfsTemplate = `
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: {{ .NFSServerIP }}
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: {{ .NFSServerIP }}
path: /var/lib/docker/nfs_data
`

View File

@@ -0,0 +1,39 @@
package a_nfs
const CmiiNFSTestTemplate = `
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致
`

View File

@@ -0,0 +1,266 @@
package b_middle
const CmiiEmqxTemplate = `
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: {{ .Namespace }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: {{ .Namespace }}
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
data:
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443
EMQX_NAME: helm-emqxs
EMQX_CLUSTER__DISCOVERY: k8s
EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs
EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: {{ .Namespace }}
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: {{ .Namespace }}
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
data:
emqx_auth_username.conf: |-
auth.user.1.username = cmlc
auth.user.1.password = odD8#Ve7.B
auth.user.password_hash = sha256
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_username,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: {{ .Namespace }}
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
spec:
affinity: {}
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: {{ .Namespace }}
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: {{ .Namespace }}
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: {{ .Namespace }}
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: {{ .Namespace }}
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: {{ .Namespace }}
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370
`

View File

@@ -0,0 +1,78 @@
package b_middle
const CmiiMongoTemplate = `
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: {{ .Namespace }}
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: {{ .Namespace }}
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: {}
containers:
- name: helm-mongo
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
`

View File

@@ -0,0 +1,411 @@
package b_middle
const CmiiMySQLTemplate = `
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
annotations:
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: {{ .Namespace }}
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: {{ .Namespace }}
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: {{ .Namespace }}
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: {{ .Namespace }}
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: {{ .Namespace }}
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv
`

View File

@@ -0,0 +1,129 @@
package b_middle
const CmiiNacosTemplate = `
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: {{ .Namespace }}
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: {{ .TagVersion }}
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: {{ .Namespace }}
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: {{ .TagVersion }}
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: {{ .Namespace }}
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: {{ .TagVersion }}
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: {}
containers:
- name: nacos-server
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---
`

View File

@@ -0,0 +1,330 @@
package b_middle
const CmiiRabbitMQTemplate = `
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: {{ .Namespace }}
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 35675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: {{ .Namespace }}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: {{ .Namespace }}
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/bitnami-shell:10-debian-10-r140
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq
`

View File

@@ -0,0 +1,585 @@
package b_middle
const CmiiRedisTemplate = `
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: {{ .Namespace }}
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: {{ .Namespace }}
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: {{ .Namespace }}
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: {{ .Namespace }}
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.{{ .Namespace }}.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}
`

View File

@@ -0,0 +1,62 @@
package b_middle
import (
"wdd.io/agent-common/logger"
"wdd.io/agent-operator/deploy/z_dep"
)
var (
EmqxApplyFilePath = ""
MongoApplyFilePath = ""
RabbitMQApplyFilePath = ""
RedisApplyFilePath = ""
MySQLApplyFilePath = ""
NacosApplyFilePath = ""
PVCApplyFilePath = ""
log = logger.Log
)
func init() {
EmqxApplyFilePath = z_dep.ApplyFilePrefix + "k8s-emqx.yaml"
MongoApplyFilePath = z_dep.ApplyFilePrefix + "k8s-mongo.yaml"
RabbitMQApplyFilePath = z_dep.ApplyFilePrefix + "k8s-rabbitmq.yaml"
RedisApplyFilePath = z_dep.ApplyFilePrefix + "k8s-redis.yaml"
MySQLApplyFilePath = z_dep.ApplyFilePrefix + "k8s-mysql.yaml"
NacosApplyFilePath = z_dep.ApplyFilePrefix + "k8s-nacos.yaml"
PVCApplyFilePath = z_dep.ApplyFilePrefix + "k8s-pvc.yaml"
log.DebugF("EmqxApplyFilePath: %s", EmqxApplyFilePath)
log.DebugF("MongoApplyFilePath: %s", MongoApplyFilePath)
log.DebugF("RabbitMQApplyFilePath: %s", RabbitMQApplyFilePath)
log.DebugF("RedisApplyFilePath: %s", RedisApplyFilePath)
log.DebugF("MySQLApplyFilePath: %s", MySQLApplyFilePath)
log.DebugF("NacosApplyFilePath: %s", NacosApplyFilePath)
log.DebugF("PVCApplyFilePath: %s", PVCApplyFilePath)
}
func MidEmqxDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiEmqxTemplate, EmqxApplyFilePath)
}
func MidMongoDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiMongoTemplate, MongoApplyFilePath)
}
func MidRabbitMQDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiRabbitMQTemplate, RabbitMQApplyFilePath)
}
func MidRedisDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiRedisTemplate, RedisApplyFilePath)
}
func MidMySQlDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiMySQLTemplate, MySQLApplyFilePath)
}
func MidNacosDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiNacosTemplate, NacosApplyFilePath)
}
func PVCDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiPVCTemplate, PVCApplyFilePath)
}

View File

@@ -0,0 +1,79 @@
package b_middle
const CmiiPVCTemplate = `
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: {{ .Namespace }}
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: {{ .TagVersion }}
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: {{ .Namespace }}
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: {{ .TagVersion }}
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: {{ .Namespace }}
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: {{ .TagVersion }}
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: {{ .Namespace }}
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: {{ .TagVersion }}
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
`

View File

@@ -0,0 +1,143 @@
package c_app
import (
"github.com/go-playground/validator/v10"
"os"
"wdd.io/agent-common/logger"
"wdd.io/agent-operator/deploy/z_dep"
)
var log = logger.Log
type CmiiBackendConfig struct {
z_dep.CommonEnvironmentConfig
AppName string `json:"app_name,omitempty" validate:"required"`
ImageTag string `json:"image_tag,omitempty" validate:"required"`
Replicas string `json:"replicas,omitempty" validate:"required" default:"1"`
NodePort string `json:"node_port,omitempty"`
NeedPvcCache bool `json:"need_pvc_cache,omitempty"`
CustomJvmOpt string `json:"custom_jvm_opt,omitempty"`
}
type CmiiFrontendConfig struct {
z_dep.CommonEnvironmentConfig `json:"z___dep_._common_environment_config"`
AppName string `json:"app_name,omitempty" validate:"required"`
ImageTag string `json:"image_tag,omitempty" validate:"required"`
Replicas string `json:"replicas,omitempty" validate:"required" default:"1"`
ShortName string `json:"short_name,omitempty" validate:"required"`
ClientId string
}
var (
DefaultCmiiBackendConfig = &CmiiBackendConfig{}
DefaultCmiiFrontendConfig = &CmiiFrontendConfig{}
BackendApplyFilePath = ""
FrontendApplyFilePath = ""
SRSApplyFilePath = ""
IngresApplyFilePath = ""
ConfigMapApplyFilePath = ""
)
func init() {
BackendApplyFilePath = z_dep.ApplyFilePrefix + "k8s-backend.yaml"
FrontendApplyFilePath = z_dep.ApplyFilePrefix + "k8s-frontend.yaml"
SRSApplyFilePath = z_dep.ApplyFilePrefix + "k8s-srs.yaml"
IngresApplyFilePath = z_dep.ApplyFilePrefix + "k8s-ingress.yaml"
ConfigMapApplyFilePath = z_dep.ApplyFilePrefix + "k8s-configmap.yaml"
log.DebugF("backend apply file path: %s\n", BackendApplyFilePath)
log.DebugF("frontend apply file path: %s\n", FrontendApplyFilePath)
log.DebugF("srs apply file path: %s\n", SRSApplyFilePath)
log.DebugF("ingress apply file path: %s\n", IngresApplyFilePath)
log.DebugF("config map apply file path: %s\n", ConfigMapApplyFilePath)
}
func (backend *CmiiBackendConfig) BackendDeploy(common *z_dep.CommonEnvironmentConfig) bool {
// copy
z_dep.CopySameFields(common, backend)
validate := validator.New()
err := validate.Struct(backend)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendDeploymentTemplate, BackendApplyFilePath) {
return false
}
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendServiceTemplate, BackendApplyFilePath) {
return false
}
// pvc
if backend.NeedPvcCache {
if !z_dep.ParseEnvToApplyFile(backend, CmiiBackendPVCTemplate, BackendApplyFilePath) {
return false
}
}
return true
}
func (frontend *CmiiFrontendConfig) FrontendDeploy(common *z_dep.CommonEnvironmentConfig) bool {
// copy
z_dep.CopySameFields(common, frontend)
validate := validator.New()
err := validate.Struct(frontend)
if err != nil {
log.ErrorF("backend config validate error: %v\n", err)
return false
}
if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendDeploymentTemplate, FrontendApplyFilePath) {
return false
}
if !z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendServiceTemplate, FrontendApplyFilePath) {
return false
}
return true
}
func (frontend *CmiiFrontendConfig) ConfigMapDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
// copy
z_dep.CopySameFields(commonEnv, frontend)
// manual validate
if frontend.ShortName == "" || frontend.ClientId == "" {
log.ErrorF("short name or client id is empty !")
return false
}
return z_dep.ParseEnvToApplyFile(frontend, CmiiFrontendConfigMapTemplate, ConfigMapApplyFilePath)
}
func IngressDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
if !commonEnv.ParseCommonEnvToApplyFile(CmiiFrontendIngressTemplate, IngresApplyFilePath) {
return false
}
if !commonEnv.ParseCommonEnvToApplyFile(CmiiBackendIngressTemplate, IngresApplyFilePath) {
return false
}
if !commonEnv.ParseCommonEnvToApplyFile(CmiiGatewayIngressTemplate, IngresApplyFilePath) {
return false
}
return true
}
func SRSDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
os.Remove(SRSApplyFilePath)
return commonEnv.ParseCommonEnvToApplyFile(CmiiSrsTemplate, SRSApplyFilePath)
}
func FrontendDefaultNginxDeploy(commonEnv *z_dep.CommonEnvironmentConfig) bool {
return commonEnv.ParseCommonEnvToApplyFile(CmiiFrontendDefaultNginxConfTemplate, FrontendApplyFilePath)
}

View File

@@ -1,19 +1,18 @@
package deploy
package c_app
import "testing"
import (
"testing"
)
func TestCmiiBackendDeploymentConfig_ParseToApplyConf(t *testing.T) {
deploymentConfig := CmiiBackendDeploymentConfig{
deploymentConfig := CmiiBackendConfig{
Namespace: "uavcloud-dev",
AppName: "cmii-uav-gateway",
ImageTag: "5.2.0-123",
TagVersion: "5.2.0",
Replicas: "2",
NodePort: "31213",
NeedPvcCache: true,
}
deploymentConfig.ParseToApplyConf()
}

View File

@@ -0,0 +1,57 @@
package c_app
var FrontendShortNameMaps = map[string]string{
"cmii-suav-platform-supervision": "supervision",
"cmii-suav-platform-supervisionh5": "supervisionh5",
"cmii-uav-platform": "platform",
"cmii-uav-platform-ai-brain": "ai-brain",
"cmii-uav-platform-armypeople": "armypeople",
"cmii-uav-platform-base": "base",
"cmii-uav-platform-cms-portal": "cmsportal",
"cmii-uav-platform-detection": "detection",
"cmii-uav-platform-emergency-rescue": "emergency",
"cmii-uav-platform-logistics": "logistics",
"cmii-uav-platform-media": "media",
"cmii-uav-platform-multiterminal": "multiterminal",
"cmii-uav-platform-mws": "mws",
"cmii-uav-platform-oms": "oms",
"cmii-uav-platform-open": "open",
"cmii-uav-platform-security": "security",
"cmii-uav-platform-securityh5": "securityh5",
"cmii-uav-platform-seniclive": "seniclive",
"cmii-uav-platform-share": "share",
"cmii-uav-platform-splice": "splice",
"cmii-uav-platform-traffic": "traffic",
"cmii-uav-platform-threedsimulation": "threedsimulation",
"cmii-uav-platform-jiangsuwenlv": "jiangsuwenlv",
"cmii-uav-platform-qinghaitourism": "qinghaitourism",
}
var FrontendClientIdMaps = map[string]string{
"cmii-suav-platform-supervision": "APP_qqSu82THfexI8PLM",
"cmii-suav-platform-supervisionh5": "APP_qqSu82THfexI8PLM",
"cmii-uav-platform": "empty",
"cmii-uav-platform-ai-brain": "APP_rafnuCAmBESIVYMH",
"cmii-uav-platform-armypeople": "APP_UIegse6Lfou9pO1U",
"cmii-uav-platform-base": "APP_9LY41OaKSqk2btY0",
"cmii-uav-platform-cms-portal": "empty",
"cmii-uav-platform-detection": "APP_FDHW2VLVDWPnnOCy",
"cmii-uav-platform-emergency-rescue": "APP_aGsTAY1uMZrpKdfk",
"cmii-uav-platform-logistics": "APP_PvdfRRRBPL8xbIwl",
"cmii-uav-platform-media": "APP_4AU8lbifESQO4FD6",
"cmii-uav-platform-multiterminal": "APP_PvdfRRRBPL8xbIwl",
"cmii-uav-platform-mws": "APP_uKniXPELlRERBBwK",
"cmii-uav-platform-oms": "empty",
"cmii-uav-platform-open": "empty",
"cmii-uav-platform-qingdao": "empty",
"cmii-uav-platform-qinghaitourism": "empty",
"cmii-uav-platform-security": "APP_JUSEMc7afyWXxvE7",
"cmii-uav-platform-securityh5": "APP_N3ImO0Ubfu9peRHD",
"cmii-uav-platform-seniclive": "empty",
"cmii-uav-platform-share": "APP_4lVSVI0ZGxTssir8",
"cmii-uav-platform-splice": "APP_zE0M3sTRXrCIJS8Y",
"cmii-uav-platform-threedsimulation": "empty",
"cmii-uav-platform-visualization": "empty",
"cmii-uav-platform-traffic": "APP_Jc8i2wOQ1t73QEJS",
"cmii-uav-platform-jiangsuwenlv": "empty",
}

View File

@@ -1,6 +1,6 @@
package deploy
package c_app
const cmiiBackendDeploymentTemplate = `
const CmiiBackendDeploymentTemplate = `
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -10,7 +10,7 @@ metadata:
cmii.type: backend
cmii.app: {{ .AppName }}
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus/control
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: {{ .TagVersion }}
spec:
replicas: {{ .Replicas }}
@@ -40,7 +40,7 @@ spec:
- name: harborsecret
containers:
- name: {{ .AppName }}
image: "harbor.cdcyy.com.cn/cmii/{{ .AppName }}:{{ .ImageTag }}"
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }}
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
@@ -48,7 +48,7 @@ spec:
- name: APPLICATION_NAME
value: {{ .AppName }}
- name: CUST_JAVA_OPTS
value: "-Xms500m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
@@ -74,9 +74,9 @@ spec:
resources:
limits:
memory: 2Gi
cpu: 2
cpu: "2"
requests:
memory: 1Gi
memory: 200Mi
cpu: 200m
livenessProbe:
httpGet:
@@ -109,7 +109,7 @@ spec:
successThreshold: 1
failureThreshold: 5
volumeMounts:
- name: glusterfs-backend-log-volume
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: {{ .Namespace }}/{{ .AppName }}
@@ -120,17 +120,17 @@ spec:
subPath: {{ .Namespace }}/{{ .AppName }}
{{- end }}
volumes:
- name: glusterfs-backend-log-volume
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: glusterfs-backend-log-pvc
claimName: nfs-backend-log-pvc
{{- if .NeedPvcCache }}
- name: data-cache-volume
persistentVolumeClaim:
claimName: {{ .AppName }}-cache
{{- end }}
`
`
const cmiiBackendServiceTemplate = `
const CmiiBackendServiceTemplate = `
apiVersion: v1
kind: Service
metadata:
@@ -140,7 +140,7 @@ metadata:
cmii.type: backend
cmii.app: {{ .AppName }}
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus/control
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: {{ .TagVersion }}
spec:
{{- if .NodePort }}
@@ -159,8 +159,9 @@ spec:
{{- if .NodePort }}
nodePort: {{ .NodePort }}
{{- end }}
`
const cmiiBackendPVCTemplate = `
`
const CmiiBackendPVCTemplate = `
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -170,7 +171,7 @@ metadata:
cmii.type: backend
cmii.app: {{ .AppName }}
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus/control
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: {{ .TagVersion }}
spec:
storageClassName: nfs-prod-distribute
@@ -180,84 +181,4 @@ spec:
resources:
requests:
storage: 15Gi
`
const cmiiFrontendDeploymentTemplate = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .AppName }}
namespace: {{ .Namespace }}
labels:
cmii.type: frontend
cmii.app: {{ .AppName }}
octopus/control: frontend-app-1.0.0
app.kubernetes.io/managed-by: octopus/control
app.kubernetes.io/app-version: {{ .TagVersion }}
spec:
replicas: {{ .Replicas }}
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: {{ .AppName }}
template:
metadata:
labels:
cmii.type: frontend
cmii.app: {{ .AppName }}
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: {{ .AppName }}
image: "harbor.cdcyy.com.cn/cmii/{{ .AppName }}:{{ .ImageTag }}"
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: {{ .Namespace }}
- name: APPLICATION_NAME
value: {{ .AppName }}
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 500m
memory: 500Mi
volumeMounts:
- name: nginx-conf
mountPath: /usr/local/nginx/conf/nginx.conf
subPath: nginx.conf
- name: default-nginx-conf
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: default-nginx-conf
configMap:
name: default-nginx-cm
items:
- key: default.conf
path: default.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-{{ .ShortName }}
items:
- key: ingress-config.js
path: ingress-config.js
`

View File

@@ -0,0 +1,94 @@
package c_app
const CmiiFrontendDeploymentTemplate = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .AppName }}
namespace: {{ .Namespace }}
labels:
cmii.type: frontend
cmii.app: {{ .AppName }}
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: {{ .TagVersion }}
spec:
replicas: {{ .Replicas }}
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: {{ .AppName }}
template:
metadata:
labels:
cmii.type: frontend
cmii.app: {{ .AppName }}
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: {{ .AppName }}
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/{{ .AppName }}:{{ .ImageTag }}
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: {{ .Namespace }}
- name: APPLICATION_NAME
value: {{ .AppName }}
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 500m
memory: 500Mi
volumeMounts:
- name: nginx-conf
mountPath: /usr/local/nginx/conf/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-{{ .ShortName }}
items:
- key: ingress-config.js
path: ingress-config.js
`
const CmiiFrontendServiceTemplate = `
apiVersion: v1
kind: Service
metadata:
name: {{ .AppName }}
namespace: {{ .Namespace }}
labels:
cmii.type: frontend
cmii.app: {{ .AppName }}
octopus.control: frontend-app-wdd
app.kubernetes.io/version: {{ .TagVersion }}
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: {{ .AppName }}
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
`

View File

@@ -0,0 +1,502 @@
package c_app
const CmiiSrsTemplate = `
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: {{ .Namespace }}
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://{{ .WebIP }}:{{ .WebPort }};
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
creationTimestamp: null
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: {{ .WebIP }}
resources:
limits:
cpu: 1200m
memory: 6Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: {{ .Namespace }}/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: {{ .Namespace }}/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://{{ .MinioInnerIP }}:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 1200m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: {{ .Namespace }}/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
creationTimestamp: null
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: {{ .HarborIP }}:{{ .HarborPort }}/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: {{ .Namespace }}
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: {{ .TagVersion }}
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: {{ .TagVersion }}
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: {{ .TagVersion }}
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://{{ .WebIP }}:30935'
rtsp: 'rtsp://{{ .WebIP }}:30554'
srt: 'srt://{{ .WebIP }}:30556'
flv: 'http://{{ .WebIP }}:30500'
hls: 'http://{{ .WebIP }}:30500'
rtc: 'webrtc://{{ .WebIP }}:30557'
replay: 'https://{{ .WebIP }}:30333'
minio:
endpoint: http://{{ .MinioInnerIP }}:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls
`

View File

@@ -0,0 +1,609 @@
package c_app
const CmiiFrontendConfigMapTemplate = `
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-{{ .ShortName }}
namespace: {{ .Namespace }}
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "{{ .TenantEnv }}",
CloudHOST: "{{ .WebIP }}:{{ .WebPort }}",
ApplicationShortName: "{{ .ShortName }}",
AppClientId: "{{ .ClientId }}"
}
`
const CmiiFrontendDefaultNginxConfTemplate = `
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: {{ .Namespace }}
labels:
cmii.type: frontend
data:
nginx.conf: |
user root;
worker_processes auto;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 600;
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}
`
const CmiiFrontendIngressTemplate = `
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: {{ .Namespace }}
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/green)$ $1/ redirect;
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/inspection)$ $1/ redirect;
rewrite ^(/park)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/cms)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/mws-admin)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/splice-visual)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/communication)$ $1/ redirect;
rewrite ^(/infrastructure)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
spec:
rules:
- host: fake-domain.{{ .Namespace }}.io
http:
paths:
- path: /inspection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /green/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /park/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /emersupport/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /infrastructure/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /cms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /mws-admin/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws-admin
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /splice-visual/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice-visual
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /communication/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /fireRescue/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
`
const CmiiBackendIngressTemplate = `
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: {{ .Namespace }}
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-open-gateway.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-brain.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-developer.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-live.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-live
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-logger.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-monitor.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-monitor
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-notice.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-security-system.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-system
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-user.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-integration.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-{{ .Namespace }}.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
`
const CmiiGatewayIngressTemplate = `
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: {{ .Namespace }}
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: {{ .TagVersion }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.{{ .Namespace }}.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
`

View File

@@ -0,0 +1,119 @@
package z_dep
import (
"bytes"
"fmt"
"github.com/go-playground/validator/v10"
"reflect"
"runtime"
"text/template"
"wdd.io/agent-common/assert"
"wdd.io/agent-common/logger"
"wdd.io/agent-common/utils"
)
var ApplyFilePrefix = ""
var Asserter = assert.Asserter
var log = logger.Log
func init() {
switch runtime.GOOS {
case "linux":
ApplyFilePrefix = "Linux value"
case "darwin": // macOS
ApplyFilePrefix = "MacOS value"
case "windows":
ApplyFilePrefix = "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\agent-operator\\deploy\\z_file\\"
default:
ApplyFilePrefix = "Unknown OS"
}
fmt.Printf("ApplyFilePrefix: %s\n", ApplyFilePrefix)
}
type CommonEnvironmentConfig struct {
WebIP string `json:"web_ip,omitempty" valid:"required"` //A1C1IP
WebPort string `json:"web_port,omitempty" valid:"required"` //A1C1JS
HarborIP string `json:"harbor_ip,omitempty" valid:"required"` //A1C2IP
HarborPort string `json:"harbor_port,omitempty" valid:"required"` // default 8033
Namespace string `json:"namespace,omitempty" valid:"required"` // SUPREME
TagVersion string `json:"tag_version,omitempty" valid:"required"` // KIMMY
TenantEnv string `json:"tenant_env,omitempty"` // TENANT_ENV 只在内部使用
MinioPublicIP string `json:"minio_public_ip,omitempty"` // M2C1IP
MinioInnerIP string `json:"minio_inner_ip,omitempty"` // M2D2IP
NFSServerIP string `json:"nfs_server_ip,omitempty"` // N1C2IP
}
//func (env *CommonEnvironmentConfig) CompactEnv() {
//
// copySameFields(env, c_app.DefaultCmiiBackendConfig)
// copySameFields(env, c_app.DefaultCmiiFrontendConfig)
//}
//
//
func (env *CommonEnvironmentConfig) ValidateAndUniform() bool {
validate := validator.New()
err := validate.Struct(env)
if err != nil {
fmt.Printf("backend config validate error: %v\n", err)
return false
}
// uniform all
if env.MinioInnerIP == "" {
env.MinioInnerIP = env.HarborIP
}
if env.MinioPublicIP == "" {
env.MinioPublicIP = env.WebIP
}
return true
}
func (env *CommonEnvironmentConfig) ParseCommonEnvToApplyFile(applyTemplate string, applyFilePath string) bool {
return ParseEnvToApplyFile(env, applyTemplate, applyFilePath)
}
func ParseEnvToApplyFile(environment any, applyTemplate string, applyFilePath string) bool {
randomString := utils.GenerateRandomString(8)
// Deployment
tmpl, err := template.New(randomString).Parse(applyTemplate)
if err != nil {
log.ErrorF("parse template error: %v", err)
return false
}
// 应用数据并打印结果
var result bytes.Buffer
err = tmpl.Execute(&result, environment)
if err != nil {
log.ErrorF("template execute error: %v", err)
return false
}
// append to file
if !utils.AppendContentWithSplitLineToFile(result.String(), applyFilePath) {
return false
}
return true
}
// CopySameFields 利用反射将a中的所有同名字段的值 复制到b中的对应字段
func CopySameFields(a, b interface{}) {
va := reflect.ValueOf(a).Elem()
vb := reflect.ValueOf(b).Elem()
for i := 0; i < va.NumField(); i++ {
fieldName := va.Type().Field(i).Name
if vb.FieldByName(fieldName).IsValid() {
vb.FieldByName(fieldName).Set(va.Field(i))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,336 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-platform
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "platform",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: zjjt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "10.100.2.121:8888",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}

View File

@@ -0,0 +1,307 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30554
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [ "" ]
resources: [ "secrets" ]
resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ]
verbs: [ "get", "update", "delete" ]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [ "" ]
resources: [ "configmaps" ]
resourceNames: [ "kubernetes-dashboard-settings" ]
verbs: [ "get", "update" ]
# Allow Dashboard to get metrics.
- apiGroups: [ "" ]
resources: [ "services" ]
resourceNames: [ "heapster", "dashboard-metrics-scraper" ]
verbs: [ "proxy" ]
- apiGroups: [ "" ]
resources: [ "services/proxy" ]
resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ]
verbs: [ "get" ]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: [ "metrics.k8s.io" ]
resources: [ "pods", "nodes" ]
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: 10.100.2.121:8033/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: { }
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 10.100.2.121:8033/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: { }
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,263 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: zjjt
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: zjjt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
data:
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443
EMQX_NAME: helm-emqxs
EMQX_CLUSTER__DISCOVERY: k8s
EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs
EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: zjjt
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: zjjt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
data:
emqx_auth_username.conf: |-
auth.user.1.username = cmlc
auth.user.1.password = odD8#Ve7.B
auth.user.password_hash = sha256
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_username,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: zjjt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
affinity: { }
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 10.100.2.121:8033/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: { }
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: zjjt
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: zjjt
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: zjjt
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: zjjt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: zjjt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,544 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: zjjt
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/green)$ $1/ redirect;
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/inspection)$ $1/ redirect;
rewrite ^(/park)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/cms)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/mws-admin)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/splice-visual)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/communication)$ $1/ redirect;
rewrite ^(/infrastructure)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
spec:
rules:
- host: fake-domain.zjjt.io
http:
paths:
- path: /inspection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /green/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /park/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /emersupport/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /infrastructure/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /cms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /mws-admin/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws-admin
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /splice-visual/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice-visual
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /communication/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /fireRescue/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: zjjt
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-open-gateway.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-brain.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-developer.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-live.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-live
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-logger.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-monitor.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-monitor
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-notice.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-security-system.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-system
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-user.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-integration.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-zjjt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: zjjt
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.zjjt.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,75 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: zjjt
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: zjjt
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: helm-mongo
image: 10.100.2.121:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,421 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: zjjt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
annotations:
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: zjjt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: zjjt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: zjjt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create
user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create
user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all
on *.* to zyly_qc@'%';
create
user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all
on *.* to k8s_admin@'%';
create
user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all
on *.* to audit_dba@'%';
create
user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT
SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT
on *.* to db_backup@'%';
create
user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION
CLIENT on *.* to monitor@'%';
flush
privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: zjjt
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zjjt
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zjjt
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: zjjt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zjjt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: zjjt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zjjt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: zjjt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zjjt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
serviceAccountName: helm-mysql
affinity: { }
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 10.100.2.121:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 10.100.2.121:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: { }
requests: { }
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv

View File

@@ -0,0 +1,126 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: zjjt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: zjjt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: zjjt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.5.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.5.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: { }
containers:
- name: nacos-server
image: 10.100.2.121:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,36 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.100.2.121:8033/cmii/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,112 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.100.2.121:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 192.168.1.1
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.1
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: zjjt
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: zjjt
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: zjjt
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.5.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,654 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zjjt
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 35675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zjjt
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zjjt
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
serviceAccountName: helm-rabbitmq
affinity: { }
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 10.100.2.121:8033/cmii/bitnami-shell:10-debian-10-r140
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: { }
requests: { }
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 10.100.2.121:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: { }
requests: { }
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zjjt
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 35675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zjjt
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: zjjt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zjjt
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zjjt
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
serviceAccountName: helm-rabbitmq
affinity: { }
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 10.100.2.121:8033/cmii/bitnami-shell:10-debian-10-r140
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: { }
requests: { }
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 10.100.2.121:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: { }
requests: { }
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,584 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: SUPREME
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: SUPREME
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: SUPREME
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: SUPREME
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.100.2.121:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: SUPREME
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: SUPREME
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: SUPREME
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.100.2.121:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.SUPREME.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

View File

@@ -0,0 +1,499 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: zjjt
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://10.100.2.121:8888;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
creationTimestamp: null
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 10.100.2.121:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 10.100.2.121
resources:
limits:
cpu: 1200m
memory: 6Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: zjjt/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: zjjt/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 10.100.2.121:8033/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://10.100.2.116:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 1200m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: zjjt/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
creationTimestamp: null
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 10.100.2.121:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: zjjt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.5.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.5.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.5.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://10.100.2.121:30935'
rtsp: 'rtsp://10.100.2.121:30554'
srt: 'srt://10.100.2.121:30556'
flv: 'http://10.100.2.121:30500'
hls: 'http://10.100.2.121:30500'
rtc: 'webrtc://10.100.2.121:30557'
replay: 'https://10.100.2.121:30333'
minio:
endpoint: http://10.100.2.116:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -5,6 +5,7 @@ go 1.22.1
require (
github.com/docker/docker v20.10.17+incompatible
github.com/docker/go-units v0.4.0
github.com/go-playground/validator/v10 v10.19.0
github.com/klauspost/pgzip v1.2.6
github.com/minio/minio-go v6.0.14+incompatible
github.com/mittwald/goharbor-client/v5 v5.5.3
@@ -13,7 +14,6 @@ require (
k8s.io/api v0.29.1
k8s.io/apimachinery v0.29.1
k8s.io/client-go v0.29.1
sigs.k8s.io/yaml v1.4.0
wdd.io/agent-common v0.0.0
)
@@ -27,6 +27,7 @@ require (
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -40,6 +41,8 @@ require (
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/goharbor/harbor/src v0.0.0-20230220075213-6015b3efa7d0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
@@ -51,6 +54,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
@@ -91,4 +95,5 @@ require (
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

View File

@@ -26,6 +26,8 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -69,6 +71,14 @@ github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4=
github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
@@ -151,6 +161,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=

View File

@@ -24,6 +24,8 @@ import (
var apiClient = newClient()
var log = logger.Log
const OfflineImageGzipFolderPrefix = "/root/octopus_image/"
func newClient() *client.Client {
apiClient, err := client.NewClientWithOpts(client.FromEnv)
@@ -140,7 +142,7 @@ func PruneAllCmiiImages() (errorRemoveImageNameList []string) {
for _, imageSummary := range imageGetAll {
for _, repoTag := range imageSummary.RepoTags {
if strings.HasPrefix(repoTag, image2.CmiiHarborPrefix) {
if strings.HasPrefix(repoTag, image2.CmiiHarborPrefix) || strings.HasPrefix(repoTag, "harbor.wdd.io") || strings.Contains(repoTag, ":8033") {
for _, tag := range imageSummary.RepoTags {
_, err := apiClient.ImageRemove(context.TODO(), imageSummary.ID, types.ImageRemoveOptions{
Force: true,
@@ -179,7 +181,7 @@ func TagFromSourceToTarget(sourceImageName, targetImageName string) bool {
return true
}
func PushToOctopusKindHarbor(targetImageName string) (pushResult io.ReadCloser) {
func UploadToOctopusKindHarbor(targetImageName string) (pushResult io.ReadCloser) {
if GetByName(targetImageName) == nil {
log.ErrorF("[ImagePushToOctopusKindHarbor] - %s not exits !", targetImageName)
@@ -200,6 +202,17 @@ func PushToOctopusKindHarbor(targetImageName string) (pushResult io.ReadCloser)
return pushResult
}
func UploadToHarbor(targetImageName string) (uploadOK bool) {
pushResult := UploadToOctopusKindHarbor(targetImageName)
defer pushResult.Close()
scanner := bufio.NewScanner(pushResult)
for scanner.Scan() {
}
fmt.Println()
log.InfoF("[UploadToHarbor] - upload %s success!", targetImageName)
fmt.Println()
}
// TagFromListAndPushToCHarbor 需要支持 harbor.cdcyy.cn ip:8033 rancher/rancher:v2.5.7 nginx:latest
func TagFromListAndPushToCHarbor(referenceImageList []string, targetHarborHost string) (errorPushImageNameList []string) {
@@ -237,7 +250,7 @@ func TagFromListAndPushToCHarbor(referenceImageList []string, targetHarborHost s
fmt.Println(targetImageName)
//if TagFromSourceToTarget(cmiiImageFullName, targetImageName) {
// pushResult := PushToOctopusKindHarbor(targetImageName)
// pushResult := UploadToOctopusKindHarbor(targetImageName)
// if pushResult == nil {
// errorPushImageNameList = append(errorPushImageNameList, cmiiImageFullName)
// log.InfoF("[ImageTagFromListAndPushToCHarbor] - push of %s error error !", targetImageName)
@@ -276,7 +289,7 @@ func PullFromCmiiHarbor(imageName string) (pullResult io.ReadCloser) {
func PullFromCmiiHarborByMap(imageVersionMap map[string]string, silentMode bool) (fullImageNameList, errorPullImageList []string) {
fullImageNameList = ConvertCMiiImageMapToList(imageVersionMap)
fullImageNameList = CmiiImageMapToFullNameList(imageVersionMap)
return fullImageNameList, PullFromFullNameList(fullImageNameList)
}
@@ -358,11 +371,13 @@ func PullFromListAndCompressSplit(fullImageNameList []string, gzipFolder string)
for _, image := range fullImageNameList {
if !SaveToTarGZ(image, gzipFolder) {
ok, path := SaveToGzipFile(image, gzipFolder)
if !ok {
errorGzipImageList = append(errorGzipImageList, image)
continue
}
tarGzipFileNameList = append(tarGzipFileNameList, image2.ImageFullNameToGzipFileName(image))
tarGzipFileNameList = append(tarGzipFileNameList, path)
}
utils.BeautifulPrintListWithTitle(tarGzipFileNameList, "image gzip name list")
@@ -370,20 +385,20 @@ func PullFromListAndCompressSplit(fullImageNameList []string, gzipFolder string)
}
// LoadFromGzipFilePath 根据Gzip文件的全名称进行Load操作
func LoadFromGzipFilePath(gzipFullPath string) bool {
openFile, err := os.OpenFile(gzipFullPath, 0, fs.ModePerm)
func LoadFromGzipFilePath(gzipFileNameFullPath string) bool {
openFile, err := os.OpenFile(gzipFileNameFullPath, 0, fs.ModePerm)
if err != nil {
log.ErrorF("[ImageLoadFromFile] - failed to open file %s, error is %s", gzipFullPath, err.Error())
log.ErrorF("[ImageLoadFromFile] - failed to open file %s, error is %s", gzipFileNameFullPath, err.Error())
return false
}
loadResponse, err := apiClient.ImageLoad(context.TODO(), openFile, true)
if err != nil {
log.ErrorF("[ImageLoadFromFile] - load error %s, error is %s", gzipFullPath, err.Error())
log.ErrorF("[ImageLoadFromFile] - load error %s, error is %s", gzipFileNameFullPath, err.Error())
return false
}
log.InfoF("[ImageLoadFromFile] - load of %s, result is %s", gzipFullPath, strconv.FormatBool(loadResponse.JSON))
log.InfoF("[ImageLoadFromFile] - load of %s, result is %s", gzipFileNameFullPath, strconv.FormatBool(loadResponse.JSON))
scanner := bufio.NewScanner(loadResponse.Body)
for scanner.Scan() {
@@ -418,18 +433,19 @@ func LoadFromFolderPath(folderPath string) (errorLoadImageNameList []string) {
return errorLoadImageNameList
}
func SaveToTarGZ(targetImageName, folderPathPrefix string) bool {
// SaveToGzipFile 根据目标镜像的全名称 将镜像压缩为Gzip文件
func SaveToGzipFile(imageFullName, folderPathPrefix string) (gzipOK bool, gzipImageFileFullPath string) {
imageGetByName := GetByName(targetImageName)
imageGetByName := GetByName(imageFullName)
if imageGetByName == nil {
log.ErrorF("[ImageSaveToTarGZ] - %s not exits", targetImageName)
return false
log.ErrorF("[ImageSaveToTarGZ] - %s not exits", imageFullName)
return false, ""
}
imageSaveTarStream, err := apiClient.ImageSave(context.TODO(), imageGetByName.RepoTags)
if err != nil {
log.ErrorF("[ImageSaveToTarGZ] - image save error %s", err.Error())
return false
return false, ""
}
var realImageTag string
@@ -440,19 +456,25 @@ func SaveToTarGZ(targetImageName, folderPathPrefix string) bool {
}
}
gzipImageFile := image2.ImageFullNameToGzipFileName(realImageTag)
gzipImageFileFullPath = image2.ImageFullNameToGzipFileName(realImageTag)
if !strings.HasSuffix(folderPathPrefix, "/") {
folderPathPrefix += "/"
}
_ = os.MkdirAll(folderPathPrefix, os.ModeDir)
gzipImageFile = folderPathPrefix + gzipImageFile
log.InfoF("[ImageSaveToTarGZ] - start to save [%s] to [%s]", realImageTag, gzipImageFile)
_ = os.Remove(gzipImageFile)
tarFile, err := os.Create(gzipImageFile)
// 生成gzip压缩文件的全路径名称
gzipImageFileFullPath = folderPathPrefix + gzipImageFileFullPath
log.InfoF("[ImageSaveToTarGZ] - start to save [%s] to [%s]", realImageTag, gzipImageFileFullPath)
// 删除掉旧的Gzip文件
_ = os.Remove(gzipImageFileFullPath)
// 创建
tarFile, err := os.Create(gzipImageFileFullPath)
if err != nil {
log.ErrorF("[ImageSaveToTarGZ] - error create gzip %s file ! => %s ", gzipImageFile, err.Error())
return false
log.ErrorF("[ImageSaveToTarGZ] - error create gzip %s file ! => %s ", gzipImageFileFullPath, err.Error())
return false, ""
}
defer tarFile.Close()
@@ -466,13 +488,14 @@ func SaveToTarGZ(targetImageName, folderPathPrefix string) bool {
// Copy the tar archive to the gzip writer.
if _, err := io.Copy(gw, imageSaveTarStream); err != nil {
log.ErrorF("[ImageSaveToTarGZ] - failed to copy tar archive to gzip writer: %s", err.Error())
return false
return false, ""
}
return true
// 成功
return true, gzipImageFileFullPath
}
func ConvertCMiiImageMapToList(cmiiImageVersionMap map[string]string) (fullImageNameList []string) {
func CmiiImageMapToFullNameList(cmiiImageVersionMap map[string]string) (fullImageNameList []string) {
for image, tag := range cmiiImageVersionMap {
s := image2.CmiiHarborPrefix + image + ":" + tag
@@ -482,6 +505,39 @@ func ConvertCMiiImageMapToList(cmiiImageVersionMap map[string]string) (fullImage
return fullImageNameList
}
func CmiiImageMapFromGzipFolder(gzipFileFolder string) (cmiiImageVersionMap map[string]string) {
allFileInFolder, err := utils.ListAllFileInFolder(gzipFileFolder)
if err != nil {
return nil
}
cmiiImageVersionMap = make(map[string]string)
for _, gzipFileName := range allFileInFolder {
log.DebugF("gzip file name is %s", gzipFileName)
imageName, imageTag := image2.GzipFileNameToImageNameAndTag(gzipFileName)
cmiiImageVersionMap[imageName] = imageTag
}
return cmiiImageVersionMap
}
func FrontendBackendImageMapFromCmiiImageMap(cmiiImageVersionMap map[string]string) (frontendImageVersionMap, backendImageVersionMap map[string]string) {
frontendImageVersionMap = make(map[string]string)
backendImageVersionMap = make(map[string]string)
for imageName, imageTag := range cmiiImageVersionMap {
if strings.Contains(imageName, "platform") {
frontendImageVersionMap[imageName] = imageTag
} else {
backendImageVersionMap[imageName] = imageTag
}
}
return frontendImageVersionMap, backendImageVersionMap
}
func loginToDockerHub(HarborFullHost string) {
if HarborFullHost == "" {

View File

@@ -8,6 +8,7 @@ import (
"wdd.io/agent-common/assert"
"wdd.io/agent-common/image"
"wdd.io/agent-common/utils"
"wdd.io/agent-operator/real_project/zjjt"
)
func TestGetRunningContainer(t *testing.T) {
@@ -87,7 +88,7 @@ func TestImagePushToOctopusKindHarbor(t *testing.T) {
assert.Equal(t, target, true, "image re-tag error !")
// push
pushResult := PushToOctopusKindHarbor(newTag)
pushResult := UploadToOctopusKindHarbor(newTag)
defer pushResult.Close()
scanner := bufio.NewScanner(pushResult)
@@ -108,7 +109,7 @@ func TestImageLoadFromFile(t *testing.T) {
func TestImageSaveToTarGZ(t *testing.T) {
image := "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6-beta"
imageSaveToTarGZ := SaveToTarGZ(image, "/home/wdd/IdeaProjects/ProjectOctopus/cmii_operator/log")
imageSaveToTarGZ, _ := SaveToGzipFile(image, "/home/wdd/IdeaProjects/ProjectOctopus/cmii_operator/log")
assert.Equal(t, imageSaveToTarGZ, true, "image save to tar gz file error !")
}
@@ -148,7 +149,7 @@ func TestImageTagFromSourceToTarget(t *testing.T) {
targetImageName := "harbor.wdd.io:8033/cmii/srs:v5.0.195"
if TagFromSourceToTarget(sourceImageName, targetImageName) {
pushResult := PushToOctopusKindHarbor(targetImageName)
pushResult := UploadToOctopusKindHarbor(targetImageName)
defer pushResult.Close()
scanner := bufio.NewScanner(pushResult)
@@ -187,7 +188,7 @@ func TestSaveSpecificImageToGzipFile(t *testing.T) {
// image pull success
log.InfoF("image should have pulled successful ! => %s", imageFullName)
if !SaveToTarGZ(imageFullName, imageGzipFilePathPrefix) {
if !SaveToGzipFile(imageFullName, imageGzipFilePathPrefix) {
log.ErrorF("image save to gzip file error ! => %s", imageFullName)
return
}
@@ -195,3 +196,17 @@ func TestSaveSpecificImageToGzipFile(t *testing.T) {
}
}
func TestConvertCmiiImageMapFromGzipFolder(t *testing.T) {
versionMap := CmiiImageMapFromGzipFolder(OfflineImageGzipFolderPrefix)
utils.BeautifulPrint(versionMap)
}
func TestFrontendBackendImageMapFromCmiiImageMap(t *testing.T) {
frontendImageVersionMap, backendImageVersionMap := FrontendBackendImageMapFromCmiiImageMap(zjjt.CmiiImageMap)
utils.BeautifulPrint(frontendImageVersionMap)
utils.BeautifulPrint(backendImageVersionMap)
}

View File

@@ -33,7 +33,7 @@ func RealProjectRunner() {
op := CmiiK8sOperator{}
op.BuildCurrentClientFromConfig(readFile)
CmiiOperator = op
DefaultCmiiOperator = op
// ops
@@ -77,12 +77,12 @@ func CmiiRunner() {
var DLTUHelp = `
DLTUHelp
dltu [ossUrlPrefix] [ossFileName] [localGzipFolder] [harborHostFullName]
dltu [ossUrlPrefix] [ossFileName] [localGzipFolder] [harborHostFullName] [namespace]
`
func main() {
// C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd66" -output "build/operator_{{.OS}}_{{.Arch}}"
// C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd64" -output "build/operator_{{.OS}}_{{.Arch}}"
//RealProjectRunner()
@@ -113,7 +113,7 @@ func main() {
result = append(result, text)
}
if len(result) != 5 {
if len(result) != 6 {
fmt.Println("input error!")
fmt.Printf(DLTUHelp)
return
@@ -123,21 +123,31 @@ func main() {
ossFileName := result[2]
localGzipFolder := result[3]
harborHostFullName := result[4]
namespace := result[5]
fmt.Println("ossUrlPrefix: ", ossUrlPrefix)
fmt.Println("ossFileName: ", ossFileName)
fmt.Println("localGzipFolder: ", localGzipFolder)
fmt.Println("harborHostFullName: ", harborHostFullName)
fmt.Println("namespace: ", namespace)
fmt.Println()
var downloadFromOss bool
if ossFileName != "" {
downloadFromOss = true
downloadFromOss := true
if ossFileName == "0" {
downloadFromOss = false
}
DownloadLoadTagPush(downloadFromOss, ossUrlPrefix, ossFileName, localGzipFolder, harborHostFullName)
// DLTU
targetImageFullNameList := DownloadLoadTagUpload(downloadFromOss, ossUrlPrefix, ossFileName, localGzipFolder, harborHostFullName)
// 下载
// 是否需要更新
if namespace != "" {
for _, targetImageFullName := range targetImageFullNameList {
if !DefaultCmiiOperator.DeploymentUpdateTagByImageFullName(namespace, targetImageFullName) {
fmt.Printf("[Update] update [%s] [%s] failed", namespace, targetImageFullName)
}
}
}
fmt.Println()
}

View File

@@ -0,0 +1,64 @@
package zjjt
var CmiiImageMap = map[string]string{
"cmii-admin-data": "5.5.0",
"cmii-admin-gateway": "5.5.0",
"cmii-admin-user": "5.5.0",
"cmii-app-release": "4.2.0-validation",
"cmii-live-operator": "5.2.0",
"cmii-open-gateway": "5.5.0",
"cmii-srs-oss-adaptor": "2023-SA",
"cmii-suav-platform-supervision": "5.5.0",
"cmii-suav-platform-supervisionh5": "5.5.0",
"cmii-suav-supervision": "5.4.0-032501",
"cmii-uav-airspace": "5.5.0",
"cmii-uav-alarm": "5.5.0",
"cmii-uav-autowaypoint": "4.2.0-beta",
"cmii-uav-brain": "5.5.0",
"cmii-uav-cloud-live": "5.5.0",
"cmii-uav-cms": "5.5.0",
"cmii-uav-data-post-process": "5.5.0",
"cmii-uav-developer": "5.5.0",
"cmii-uav-device": "5.5.0",
"cmii-uav-emergency": "5.3.0",
"cmii-uav-gateway": "5.5.0",
"cmii-uav-gis-server": "5.5.0",
"cmii-uav-grid-datasource": "5.2.0-24810",
"cmii-uav-grid-engine": "5.1.0",
"cmii-uav-grid-manage": "5.1.0",
"cmii-uav-industrial-portfolio": "5.5.0-041801",
"cmii-uav-integration": "5.5.0-0419",
"cmii-uav-kpi-monitor": "5.5.0",
"cmii-uav-logger": "5.5.0",
"cmii-uav-material-warehouse": "5.5.0",
"cmii-uav-mission": "5.5.0",
"cmii-uav-mqtthandler": "5.5.0",
"cmii-uav-multilink": "5.5.0",
"cmii-uav-notice": "5.5.0",
"cmii-uav-oauth": "5.5.0",
"cmii-uav-platform": "5.5.0",
"cmii-uav-platform-ai-brain": "5.5.0",
"cmii-uav-platform-armypeople": "5.5.0",
"cmii-uav-platform-base": "5.4.0",
"cmii-uav-platform-cms-portal": "5.5.0",
"cmii-uav-platform-detection": "5.5.0",
"cmii-uav-platform-jiangsuwenlv": "4.1.3-jiangsu-0427",
"cmii-uav-platform-logistics": "5.5.0",
"cmii-uav-platform-media": "5.5.0",
"cmii-uav-platform-multiterminal": "5.5.0",
"cmii-uav-platform-mws": "5.5.0",
"cmii-uav-platform-oms": "5.5.0",
"cmii-uav-platform-open": "5.5.0-0419",
"cmii-uav-platform-qinghaitourism": "4.1.0-21377-0508",
"cmii-uav-platform-security": "5.5.0",
"cmii-uav-platform-securityh5": "5.5.0",
"cmii-uav-platform-share": "5.5.0",
"cmii-uav-platform-splice": "5.5.0",
"cmii-uav-platform-threedsimulation": "5.2.0-21392",
"cmii-uav-process": "5.5.0",
"cmii-uav-surveillance": "5.5.0",
"cmii-uav-threedsimulation": "5.5.0",
"cmii-uav-tower": "5.5.0",
"cmii-uav-user": "5.5.0",
"cmii-uav-waypoint": "5.5.0",
}

View File

@@ -0,0 +1,65 @@
package zjjt
var RealImagePullList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.4.0-032501",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.3.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.5.0-041801",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.5.0-0419",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.5.0-0419",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.5.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
}

View File

@@ -0,0 +1,23 @@
2024-04-18-17-20-00 uavcloud-demo cmii-uav-integration 5.5.0 5.5.0-0418
2024-04-19-09-30-00 uavcloud-demo cmii-uav-platform-open 5.5.0 5.5.0-0419
2024-04-22-09-18-00 uavcloud-demo cmii-uav-airspace 5.5.0 5.5.0-0422
2024-04-22-14-20-00 uavcloud-demo cmii-uav-airspace 5.5.0-0422 5.5.0-042201
2024-04-22-16-57-00 uavcloud-demo cmii-uav-airspace 5.5.0-042201 5.5.0-042202
2024-04-23-09-27-00 uavcloud-demo cmii-uav-device 5.5.0 5.5.0-042301
2024-04-23-11-15-00 uavcloud-demo cmii-uav-platform 5.5.0 5.5.0-042301
2024-04-23-11-17-00 uavcloud-demo cmii-suav-platform-supervision 5.5.0 5.5.0-042301
2024-04-23-16-28-06 uavcloud-dev cmii-suav-platform-supervision 5.2.0-test 5.5.0-042301
2024-04-23-17-35-00 uavcloud-dev cmii-uav-platform-armypeople 5.5.0-validation 5.5.0-042301
2024-04-23-17-36-00 uavcloud-demo cmii-uav-platform-armypeople 5.5.0-042201 5.5.0-042301
2024-04-24-12-00-00 uavcloud-demo cmii-uav-platform 5.5.0-042301 5.5.0-042401
2024-04-24-12-00-07 uavcloud-demo cmii-uav-airspace 5.5.0-042202 5.5.0-042401
2024-04-24-12-01-47 uavcloud-demo cmii-uav-industrial-portfolio 5.5.0-042201 5.5.0-042401
2024-04-24-12-03-14 uavcloud-demo cmii-uav-surveillance 5.5.0 5.5.0-042401
2024-04-24-17-30-00 uavcloud-demo cmii-uav-platform 5.5.0-042401 5.5.0-042402
2024-04-24-17-30-07 uavcloud-demo cmii-uav-cloud-live 5.5.0 5.5.0-042401
2024-04-24-17-31-51 uavcloud-demo cmii-uav-mission 5.5.0 5.5.0-042401
2024-04-25-09-36-00 uavcloud-demo cmii-uav-platform 5.5.0-042402 5.5.0-042501
2024-04-25-09-37-12 uavcloud-demo cmii-uav-industrial-portfolio 5.5.0-042401 5.5.0-042501
2024-04-25-17-42-00 uavcloud-demo cmii-uav-platform 5.5.0-042501 5.5.0-042503
2024-04-25-17-42-06 uavcloud-demo cmii-uav-platform-splice 5.5.0 5.5.0-042501
2024-04-25-17-45-00 uavcloud-demo cmii-uav-data-post-process 5.5.0 5.5.0-042501

View File

@@ -0,0 +1,30 @@
Set-ExecutionPolicy RemoteSigned -Scope CurrentUser
$basePath = $PWD
$remoteHost = "10.100.2.121"
Write-Host "Current Running Path is $basePath"
Write-Host "Connecting to remote host is $remoteHost"
# This is a PowerShell script to run port_win64.exe with admin privileges and keep running in the background
Write-Host "Start the port forwarding !"
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:8888: conn:$remoteHost:8888" -Verb RunAs
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:8889 conn:$remoteHost:8889" -Verb RunAs
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:9000 conn:$remoteHost:9000" -Verb RunAs
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:30554 conn:$remoteHost:30554" -Verb RunAs
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:30557 conn:$remoteHost:30557" -Verb RunAs
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:38989 conn:$remoteHost:38989" -Verb RunAs
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:31935 conn:$remoteHost:31935" -Verb RunAs
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "udp listen:0.0.0.0:30090 conn:$remoteHost:30090" -Verb RunAs
C:\Users\SuperDD\Desktop\port_win64.exe tcp listen:10.250.0.20:8888: conn:10.100.2.121:8888
C:\Users\SuperDD\Desktop\port_win64.exe tcp listen:10.250.0.20:8889 conn:10.100.2.121:8889
C:\Users\SuperDD\Desktop\port_win64.exe tcp listen:10.250.0.20:9000 conn:10.100.2.121:9000
C:\Users\SuperDD\Desktop\port_win64.exe tcp listen:10.250.0.20:30554 conn:10.100.2.121:30554
C:\Users\SuperDD\Desktop\port_win64.exe tcp listen:10.250.0.20:30557 conn:10.100.2.121:30557
C:\Users\SuperDD\Desktop\port_win64.exe tcp listen:10.250.0.20:38989 conn:10.100.2.121:38989
C:\Users\SuperDD\Desktop\port_win64.exe tcp listen:10.250.0.20:30935 conn:10.100.2.121:30935
C:\Users\SuperDD\Desktop\port_win64.exe udp listen:10.250.0.20:30090 conn:10.100.2.121:30090

View File

@@ -38,13 +38,13 @@ public class TestImageSyncScheduler {
public void runImageSync() {
ArrayList<String> CmiiAppNameList = new ArrayList<>(List.of(
// "cmii-uav-process:5.4.0-041901"
// "cmii-uav-process:5.4.0-041901"1
));
ArrayList<String> ImageFullNameList = new ArrayList<>(List.of(
// "harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
// "harbor.cdcyy.com.cn/cmii/cmii/srs:v5.0.195"
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.3.0-cqly-042302"
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.3.0-cqly-042601"
));
Boolean downloadAndCompressOnly = false;