[agent-go]-简化Agent 剔除Harbor K8s Image相关的内容

This commit is contained in:
zeaslity
2025-01-22 15:09:43 +08:00
parent 4edaf9f35a
commit 0d3bb30eed
15 changed files with 2569 additions and 2666 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -12,6 +12,7 @@ import (
)
var OctopusAgentInstallPrefix = "/root/wdd/"
var nfsDataPath = "/var/lib/docker/nfs_data"
type BaseFunc interface {
@@ -500,6 +501,41 @@ func (op *AgentOsOperator) InstallDefaultSshBastion() (bool, []string) {
}
}
func (op *AgentOsOperator) ModifySSHConfigBastion() (bool, []string) {
// 替换sshd文件
sshdConfigFile := "/etc/ssh/sshd_config"
sshConfigBackupFile := "/etc/ssh/sshd_config_wdd_back"
// 只备份第一次
if !BasicFileExistAndNotNull(sshConfigBackupFile) {
BasicAppendOverwriteContentToFile(sshdConfigFile, sshConfigBackupFile)
}
// AllowTcpForwarding yes
// AllowAgentForwarding yes
// X11Forwarding yes
// StrictMode no
// PermitRootLogin yes
// PasswordAuthentication yes
// PubkeyAuthentication yes
// 直接使用默认的sshd_config配置
if !BasicAppendOverwriteContentToFile(beans.DefaultSshdConfig, sshdConfigFile) {
return false, []string{
"error write default sshd config to sshd file",
}
}
ok, resultLog := BasicSystemdRestart("sshd")
if !ok {
return false, []string{
"sshd restart failed ! please check !",
}
}
return true, resultLog
}
func (op *AgentOsOperator) removeDocker() [][]string {
removeDockerLine := append(op.RemoveCommandPrefix, []string{

View File

@@ -583,6 +583,34 @@ func BasicSystemdUp(serviceName string) (ok bool, resultLog []string) {
return resultOK, resultLog
}
// BasicSystemdRestart 使用Systemd 重启 服务,会判定服务器是否存在
func BasicSystemdRestart(serviceName string) (ok bool, resultLog []string) {
if !strings.HasSuffix(serviceName, ".service") {
serviceName = serviceName + ".service"
}
// 检查是否存在
existService := BasicFileExistInFolder(serviceName, "/lib/systemd/system/",
"/etc/systemd/system")
if !existService {
return true, []string{
serviceName,
"该服务不存在!",
}
}
// 关闭
var restartCommand = []string{
"systemctl",
"restart",
serviceName,
}
resultOK := PureResultSingleExecute(restartCommand)
return resultOK, resultLog
}
// BasicTransPipelineCommand 转换手写的管道命令为可执行的形式
func BasicTransPipelineCommand(pipelineString string) (pipelineCommand [][]string) {
@@ -839,10 +867,8 @@ func BasicAppendContentToFile(content string, targetFile string) bool {
// BasicAppendOverwriteContentToFile 向目标文件中写入一些内容,覆盖源文件
func BasicAppendOverwriteContentToFile(content string, targetFile string) bool {
err := os.Remove(targetFile)
if err != nil {
log.WarnF("[BasicAppendOverwriteContentToFile] - Error removing file: %s , error is %s", targetFile, err.Error())
}
// 删除文件,直接覆写
_ = os.Remove(targetFile)
return BasicAppendContentToFile(content, targetFile)
}

View File

@@ -102,25 +102,33 @@ func Execute(em *ExecutionMessage) (bool, []string) {
} else if strings.HasPrefix(em.ExecutionType, "HARBOR") {
// harbor function
if em.FuncContent == nil || len(em.FuncContent) <= 1 {
ok = false
resultLog = []string{
"[Harbor Execute] - functions args is wrong!",
}
//if em.FuncContent == nil || len(em.FuncContent) <= 1 {
// ok = false
// resultLog = []string{
// "[Harbor Execute] - functions args is wrong!",
// }
//}
//// Harbor Execute
//ok, resultLog = HarborOperatorCache.Exec(em.FuncContent[0], em.FuncContent[1:]...)
ok = true
resultLog = []string{
"null function!",
}
// Harbor Execute
ok, resultLog = HarborOperatorCache.Exec(em.FuncContent[0], em.FuncContent[1:]...)
} else if strings.HasPrefix(em.ExecutionType, "IMAGE") {
// image function
if em.FuncContent == nil || len(em.FuncContent) <= 1 {
ok = false
resultLog = []string{
"[Harbor Execute] - functions args is wrong!",
}
//if em.FuncContent == nil || len(em.FuncContent) <= 1 {
// ok = false
// resultLog = []string{
// "[Harbor Execute] - functions args is wrong!",
// }
//}
//// Harbor Execute
//ok, resultLog = AgentOsOperatorCache.Sync(em.FuncContent[0], em.FuncContent[1:]...)
ok = true
resultLog = []string{
"null function!",
}
// Harbor Execute
ok, resultLog = AgentOsOperatorCache.Sync(em.FuncContent[0], em.FuncContent[1:]...)
} else {
// deprecated
// shell command

View File

@@ -1,413 +1,414 @@
package a_executor
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"github.com/mittwald/goharbor-client/v5/apiv2"
"github.com/mittwald/goharbor-client/v5/apiv2/model"
)
type HarborOperator struct {
SourceHarborHost string `json:"sourceHarborHost,omitempty"`
TargetHarborHost string `json:"targetHarborHost,omitempty"`
HarborPort string `json:"harborPort,omitempty"`
HarborAdminUser string `json:"harborAdminUser,omitempty"`
HarborAdminPass string `json:"harborAdminPass,omitempty"`
TargetHarborClient *apiv2.RESTClient
SourceHarborClient *apiv2.RESTClient
}
// NewHarborOperator 返回一个带有默认HarborAdminPass的HarborOperator实例
func NewHarborOperator() *HarborOperator {
return &HarborOperator{
HarborPort: "8033",
HarborAdminUser: "admin", // 设置默认值
HarborAdminPass: "V2ryStr@ngPss", // 设置默认值
}
}
// HarborOperatorCache 饿汉式单例
var HarborOperatorCache = NewHarborOperator()
var OctopusReplicationPolicyName = "octopus-sync-replication"
func (hOp *HarborOperator) Exec(baseFuncName string, funcArgs ...string) (bool, []string) {
// 参见 HarborFunctionEnum
resultOk := true
var resultLog []string
switch baseFuncName {
case "CREATE_PROJECT":
resultOk, resultLog = hOp.CreateProjectExec(funcArgs)
break
case "LIST_PROJECT":
resultOk, resultLog = hOp.ListProjectExec(funcArgs)
break
case "SYNC_PROJECT_BETWEEN_HARBOR":
resultOk, resultLog = hOp.SyncProjectExec(funcArgs)
break
case "SYNC_STATUS_HARBOR":
resultOk, resultLog = hOp.SyncStatusExec(funcArgs)
break
}
return resultOk, resultLog
}
func (hOp *HarborOperator) CreateProjectExec(funcArgs []string) (bool, []string) {
if hOp.TargetHarborClient == nil {
ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
if !ok {
return false, []string{
"[Harbor Create Project] - Error !",
}
}
hOp.TargetHarborClient = createClient
}
client := hOp.TargetHarborClient
// create project
// 定义你想要创建的仓库(项目)的详细信息
log.Debug("[Harbor Create Project] - create project !")
needToCreateProjectNameList := []string{"cmii", "rancher"}
// 使用客户端创建项目
ctx := context.Background()
for _, projectName := range needToCreateProjectNameList {
log.DebugF("start to create proect => %s", projectName)
projectReq := &model.ProjectReq{
ProjectName: projectName, // 仓库名称
Metadata: &model.ProjectMetadata{
Public: "true", // 是否是公开的
},
}
exists, _ := client.ProjectExists(ctx, projectName)
if !exists {
err := client.NewProject(ctx, projectReq)
if err != nil {
errorLog := fmt.Sprintf("Error creating project %s: %s\n", projectName, err.Error())
return false, []string{errorLog}
}
}
log.DebugF("[Harbor Create Project] - Project %s already exists ! continue ", projectName)
}
successLog := "[Harbor CreateProjectExec] - Project Create Success !"
log.Info(successLog)
return true, []string{successLog}
}
// todo refresh HarborClient不一定有端口
func (hOp *HarborOperator) CheckAndBuildHarborClient(targetHarborHost string, targetHarborPort string, isTarget bool) (bool, *apiv2.RESTClient) {
log.InfoF("[Harbor Client Create] - start to create harbor client %s", targetHarborHost)
if targetHarborPort == "" {
targetHarborPort = hOp.HarborPort
}
if isTarget {
hOp.TargetHarborHost = "http://" + targetHarborHost + ":" + targetHarborPort + "/api/"
log.DebugF("[Harbor Client Create] - harbor host is => %s", hOp.TargetHarborHost)
} else {
hOp.SourceHarborHost = "http://" + targetHarborHost + ":" + targetHarborPort + "/api/"
log.DebugF("[Harbor Client Create] - harbor host is => %s", hOp.SourceHarborHost)
}
// check connection
var client *apiv2.RESTClient
var err error
if isTarget {
client, err = apiv2.NewRESTClientForHost(hOp.TargetHarborHost, hOp.HarborAdminUser, hOp.HarborAdminPass, nil)
} else {
client, err = apiv2.NewRESTClientForHost(hOp.SourceHarborHost, hOp.HarborAdminUser, hOp.HarborAdminPass, nil)
}
if err != nil {
errorLog := fmt.Sprintf("Error creating REST client: %s\n", err.Error())
log.Error(errorLog)
return false, nil
}
return true, client
}
func (hOp *HarborOperator) ListProjectExec(funcArgs []string) (bool, []string) {
if hOp.TargetHarborClient == nil {
ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
if !ok {
return false, []string{
"[Harbor Create Project ] - Error !",
}
}
hOp.TargetHarborClient = createClient
}
client := hOp.TargetHarborClient
// 使用客户端列出所有项目
ctx := context.Background()
projects, err := client.ListProjects(ctx, "")
if err != nil {
fmt.Printf("Error listing projects: %v\n", err)
os.Exit(1)
}
// 打印所有项目的信息
for _, project := range projects {
fmt.Printf("Project ID: %d, Name: %s, Public: %v\n", project.ProjectID, project.Name, project.Metadata.Public)
}
marshal, _ := json.Marshal(projects)
return true, []string{
fmt.Sprintf("List Projects of %s ", hOp.TargetHarborHost),
string(marshal),
}
}
func (hOp *HarborOperator) SyncProjectExec(funcArgs []string) (bool, []string) {
if hOp.TargetHarborClient == nil {
ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
if !ok {
return false, []string{
"[Harbor Sync Project ] - Error !",
}
}
hOp.TargetHarborClient = createClient
}
targetClient := hOp.TargetHarborClient
if hOp.SourceHarborClient == nil {
realHost := funcArgs[1]
realPort := ""
if strings.Contains(funcArgs[1], ":") {
split := strings.Split(funcArgs[1], ":")
realHost = split[0]
realPort = split[1]
}
ok, createClient := hOp.CheckAndBuildHarborClient(realHost, realPort, false)
if !ok {
return false, []string{
"[Harbor Sync Project ] - Error !",
}
}
hOp.SourceHarborClient = createClient
}
needToSynchronizedProject := funcArgs[2]
log.InfoF("[Harbor Sync Project ] - start to sync harbor project => %s", needToSynchronizedProject)
log.DebugF("[Harbor Sync Project ] - start to check projects all exists!")
ctx := context.Background()
// check both source and target harbor project exists
needToCreateProjectNameList := []string{"rancher", "cmii"}
for _, projectName := range needToCreateProjectNameList {
syncNotExistHarborProjectError := []string{
"[Harbor Sync Project ] - project not exists !",
}
exists, _ := targetClient.ProjectExists(ctx, projectName)
if !exists {
return false, append(syncNotExistHarborProjectError, "targetClient")
}
}
OctopusSourceHarborName := "octopus-source"
// add source registry to destination harbor
log.InfoF("[Harbor Sync Project ] - start to create source harbor endpoints => %s", hOp.SourceHarborHost)
log.Debug("[Harbor Sync Project ] - start to delete exist replication policy !")
policies, _ := targetClient.ListReplicationPolicies(ctx)
if policies != nil {
for _, policy := range policies {
if policy.Name == OctopusReplicationPolicyName {
err := targetClient.DeleteReplicationPolicyByID(ctx, policy.ID)
if err != nil {
log.ErrorF("[Harbor Sync Project ] - delete exists replication policy failed ! => %v ", policy)
}
}
}
}
log.Debug("[Harbor Sync Project ] - start to delete exist exist harbor endpoints !")
exitRegistry, _ := targetClient.GetRegistryByName(ctx, OctopusSourceHarborName)
if exitRegistry != nil {
log.Debug("[Harbor Sync Project ] - source endpoints already exists ! delete it !")
err := targetClient.DeleteRegistryByID(ctx, exitRegistry.ID)
if err != nil {
log.ErrorF("[Harbor Sync Project ] - source endpoints delete failed ! => %v ", exitRegistry)
}
}
// todo cqga failed
octopusSourceRegistry := &model.Registry{
Credential: &model.RegistryCredential{
AccessKey: "admin",
AccessSecret: "V2ryStr@ngPss",
Type: "basic",
},
Insecure: true,
Name: OctopusSourceHarborName, // 源 Harbor 实例的注册表 ID通常为 0
Type: "harbor",
URL: strings.Split(hOp.SourceHarborHost, "/api")[0],
}
err := targetClient.NewRegistry(ctx, octopusSourceRegistry)
if err != nil {
sprintf := fmt.Sprintf("[SyncProjectExec] - source endpoints create failed ! => %s", err.Error())
log.Error(sprintf)
return false, []string{sprintf}
}
// get the real one for it's ID
realOctopusSourceRegistry, err := targetClient.GetRegistryByName(ctx, OctopusSourceHarborName)
if err != nil {
sprintf := fmt.Sprintf("[SyncProjectExec] - get target registry id failed ! => %s", err.Error())
log.Error(sprintf)
return false, []string{sprintf}
}
// 创建复制策略
octopusReplicationPolicy := &model.ReplicationPolicy{
CopyByChunk: nil,
Deletion: false,
Description: "",
DestNamespace: "", // 可以指定目标 Harbor 中的特定项目,如果为空,则使用源项目名称
DestNamespaceReplaceCount: nil,
SrcRegistry: &model.Registry{
ID: realOctopusSourceRegistry.ID,
},
DestRegistry: &model.Registry{
ID: 0,
},
Enabled: true,
Filters: []*model.ReplicationFilter{
{
Type: "name",
Value: needToSynchronizedProject + "/**",
},
},
ID: 0,
Name: OctopusReplicationPolicyName,
Override: true,
ReplicateDeletion: false,
Speed: nil,
Trigger: &model.ReplicationTrigger{
Type: "manual", // 可以是 "manual", "scheduled", 或 "event_based"
// 如果是 "scheduled",还需要设置 Cron 表达式
// TriggerSettings: &model.TriggerSettings{Cron: "0 * * * *"},
},
}
// 在源 Harbor 中创建复制策略
log.InfoF("[Harbor Sync Project ] - Start To Sync Project => %s !", needToSynchronizedProject)
err = targetClient.NewReplicationPolicy(ctx, octopusReplicationPolicy.DestRegistry, octopusReplicationPolicy.SrcRegistry, octopusReplicationPolicy.Deletion, octopusReplicationPolicy.Override, octopusReplicationPolicy.Enabled, octopusReplicationPolicy.Filters, octopusReplicationPolicy.Trigger, octopusReplicationPolicy.DestNamespace, octopusReplicationPolicy.Name, octopusReplicationPolicy.Name)
if err != nil {
syncErrorMessage := fmt.Sprintf("[Harbor Sync Project ] - Sync Project [ %s ] Failed ! Error is => %s\n", needToSynchronizedProject, err.Error())
log.Error(syncErrorMessage)
return false, []string{
syncErrorMessage,
}
}
realOctopusReplicationPolicy, err := targetClient.GetReplicationPolicyByName(ctx, OctopusReplicationPolicyName)
if err != nil {
return false, []string{
"[Harbor Sync Project ] - failed to get the realOctopusReplicationPolicy!",
err.Error(),
}
}
err = targetClient.TriggerReplicationExecution(ctx, &model.StartReplicationExecution{
PolicyID: realOctopusReplicationPolicy.ID,
})
if err != nil {
return false, []string{
"[ Harbor Sync Project ] - failed to start the harbor sync execution !",
err.Error(),
}
}
return true, []string{
fmt.Sprintf("[ Harbor Sync Project ] - sync project [ %s ] started !", needToSynchronizedProject),
}
}
func (hOp *HarborOperator) SyncStatusExec(funcArgs []string) (bool, []string) {
if hOp.TargetHarborClient == nil {
ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
if !ok {
return false, []string{
"[ Sync Status ] - Error !",
}
}
hOp.TargetHarborClient = createClient
}
targetClient := hOp.TargetHarborClient
ctx := context.Background()
// check replication policy exists
replicationPolicy, err := targetClient.GetReplicationPolicyByName(ctx, OctopusReplicationPolicyName)
if err != nil {
return false, []string{
"[ Sync Status ] - get replication error !",
err.Error(),
}
}
// list execution status
replicationExecutions, err := targetClient.ListReplicationExecutions(ctx, &replicationPolicy.ID, nil, nil)
if err != nil {
return false, []string{
"[ Sync Status ] - replication has no sync work !",
err.Error(),
}
}
// find the newest one only have one here
for _, execution := range replicationExecutions {
if !strings.HasPrefix(execution.Status, "Succeed") {
bytes, _ := json.Marshal(execution)
log.InfoF("[sync status]- status are => %v", string(bytes))
// report status
return false, []string{
fmt.Sprintf("[sync status] - not complete ! progress is => %s %%",
strconv.FormatFloat(float64(execution.Succeed)/float64(execution.Total)*100, 'f', 2, 64)),
}
}
}
return true, []string{
"[sync status]- sync completed !",
}
}
func (hOp *HarborOperator) Command(baseFuncName string, funcArgs ...string) []string {
return nil
}
//
//import (
// "context"
// "encoding/json"
// "fmt"
// "os"
// "strconv"
// "strings"
//
// "github.com/mittwald/goharbor-client/v5/apiv2"
// "github.com/mittwald/goharbor-client/v5/apiv2/model"
//)
//
//type HarborOperator struct {
// SourceHarborHost string `json:"sourceHarborHost,omitempty"`
//
// TargetHarborHost string `json:"targetHarborHost,omitempty"`
//
// HarborPort string `json:"harborPort,omitempty"`
//
// HarborAdminUser string `json:"harborAdminUser,omitempty"`
//
// HarborAdminPass string `json:"harborAdminPass,omitempty"`
//
// TargetHarborClient *apiv2.RESTClient
//
// SourceHarborClient *apiv2.RESTClient
//}
//
//// NewHarborOperator 返回一个带有默认HarborAdminPass的HarborOperator实例
//func NewHarborOperator() *HarborOperator {
// return &HarborOperator{
// HarborPort: "8033",
// HarborAdminUser: "admin", // 设置默认值
// HarborAdminPass: "V2ryStr@ngPss", // 设置默认值
// }
//}
//
//// HarborOperatorCache 饿汉式单例
//var HarborOperatorCache = NewHarborOperator()
//var OctopusReplicationPolicyName = "octopus-sync-replication"
//
//func (hOp *HarborOperator) Exec(baseFuncName string, funcArgs ...string) (bool, []string) {
// // 参见 HarborFunctionEnum
//
// resultOk := true
// var resultLog []string
//
// switch baseFuncName {
// case "CREATE_PROJECT":
// resultOk, resultLog = hOp.CreateProjectExec(funcArgs)
// break
// case "LIST_PROJECT":
// resultOk, resultLog = hOp.ListProjectExec(funcArgs)
// break
// case "SYNC_PROJECT_BETWEEN_HARBOR":
// resultOk, resultLog = hOp.SyncProjectExec(funcArgs)
// break
// case "SYNC_STATUS_HARBOR":
// resultOk, resultLog = hOp.SyncStatusExec(funcArgs)
// break
// }
//
// return resultOk, resultLog
//}
//
//func (hOp *HarborOperator) CreateProjectExec(funcArgs []string) (bool, []string) {
//
// if hOp.TargetHarborClient == nil {
//
// ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
// if !ok {
// return false, []string{
// "[Harbor Create Project] - Error !",
// }
// }
// hOp.TargetHarborClient = createClient
// }
// client := hOp.TargetHarborClient
//
// // create project
// // 定义你想要创建的仓库(项目)的详细信息
//
// log.Debug("[Harbor Create Project] - create project !")
// needToCreateProjectNameList := []string{"cmii", "rancher"}
// // 使用客户端创建项目
// ctx := context.Background()
// for _, projectName := range needToCreateProjectNameList {
//
// log.DebugF("start to create proect => %s", projectName)
// projectReq := &model.ProjectReq{
// ProjectName: projectName, // 仓库名称
// Metadata: &model.ProjectMetadata{
// Public: "true", // 是否是公开的
// },
// }
//
// exists, _ := client.ProjectExists(ctx, projectName)
// if !exists {
//
// err := client.NewProject(ctx, projectReq)
// if err != nil {
// errorLog := fmt.Sprintf("Error creating project %s: %s\n", projectName, err.Error())
// return false, []string{errorLog}
// }
// }
//
// log.DebugF("[Harbor Create Project] - Project %s already exists ! continue ", projectName)
//
// }
//
// successLog := "[Harbor CreateProjectExec] - Project Create Success !"
// log.Info(successLog)
//
// return true, []string{successLog}
//}
//
//// todo refresh HarborClient不一定有端口
//func (hOp *HarborOperator) CheckAndBuildHarborClient(targetHarborHost string, targetHarborPort string, isTarget bool) (bool, *apiv2.RESTClient) {
//
// log.InfoF("[Harbor Client Create] - start to create harbor client %s", targetHarborHost)
//
// if targetHarborPort == "" {
// targetHarborPort = hOp.HarborPort
// }
//
// if isTarget {
// hOp.TargetHarborHost = "http://" + targetHarborHost + ":" + targetHarborPort + "/api/"
// log.DebugF("[Harbor Client Create] - harbor host is => %s", hOp.TargetHarborHost)
// } else {
// hOp.SourceHarborHost = "http://" + targetHarborHost + ":" + targetHarborPort + "/api/"
// log.DebugF("[Harbor Client Create] - harbor host is => %s", hOp.SourceHarborHost)
// }
//
// // check connection
// var client *apiv2.RESTClient
// var err error
// if isTarget {
// client, err = apiv2.NewRESTClientForHost(hOp.TargetHarborHost, hOp.HarborAdminUser, hOp.HarborAdminPass, nil)
// } else {
// client, err = apiv2.NewRESTClientForHost(hOp.SourceHarborHost, hOp.HarborAdminUser, hOp.HarborAdminPass, nil)
// }
// if err != nil {
// errorLog := fmt.Sprintf("Error creating REST client: %s\n", err.Error())
// log.Error(errorLog)
// return false, nil
// }
//
// return true, client
//}
//
//func (hOp *HarborOperator) ListProjectExec(funcArgs []string) (bool, []string) {
//
// if hOp.TargetHarborClient == nil {
// ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
// if !ok {
// return false, []string{
// "[Harbor Create Project ] - Error !",
// }
// }
// hOp.TargetHarborClient = createClient
// }
// client := hOp.TargetHarborClient
//
// // 使用客户端列出所有项目
// ctx := context.Background()
// projects, err := client.ListProjects(ctx, "")
// if err != nil {
// fmt.Printf("Error listing projects: %v\n", err)
// os.Exit(1)
// }
//
// // 打印所有项目的信息
// for _, project := range projects {
// fmt.Printf("Project ID: %d, Name: %s, Public: %v\n", project.ProjectID, project.Name, project.Metadata.Public)
// }
//
// marshal, _ := json.Marshal(projects)
//
// return true, []string{
// fmt.Sprintf("List Projects of %s ", hOp.TargetHarborHost),
// string(marshal),
// }
//}
//
//func (hOp *HarborOperator) SyncProjectExec(funcArgs []string) (bool, []string) {
//
// if hOp.TargetHarborClient == nil {
// ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
// if !ok {
// return false, []string{
// "[Harbor Sync Project ] - Error !",
// }
// }
// hOp.TargetHarborClient = createClient
// }
// targetClient := hOp.TargetHarborClient
//
// if hOp.SourceHarborClient == nil {
// realHost := funcArgs[1]
// realPort := ""
// if strings.Contains(funcArgs[1], ":") {
// split := strings.Split(funcArgs[1], ":")
// realHost = split[0]
// realPort = split[1]
// }
// ok, createClient := hOp.CheckAndBuildHarborClient(realHost, realPort, false)
// if !ok {
// return false, []string{
// "[Harbor Sync Project ] - Error !",
// }
// }
// hOp.SourceHarborClient = createClient
// }
//
// needToSynchronizedProject := funcArgs[2]
// log.InfoF("[Harbor Sync Project ] - start to sync harbor project => %s", needToSynchronizedProject)
//
// log.DebugF("[Harbor Sync Project ] - start to check projects all exists!")
// ctx := context.Background()
//
// // check both source and target harbor project exists
// needToCreateProjectNameList := []string{"rancher", "cmii"}
//
// for _, projectName := range needToCreateProjectNameList {
// syncNotExistHarborProjectError := []string{
// "[Harbor Sync Project ] - project not exists !",
// }
// exists, _ := targetClient.ProjectExists(ctx, projectName)
// if !exists {
// return false, append(syncNotExistHarborProjectError, "targetClient")
// }
// }
//
// OctopusSourceHarborName := "octopus-source"
//
// // add source registry to destination harbor
// log.InfoF("[Harbor Sync Project ] - start to create source harbor endpoints => %s", hOp.SourceHarborHost)
//
// log.Debug("[Harbor Sync Project ] - start to delete exist replication policy !")
// policies, _ := targetClient.ListReplicationPolicies(ctx)
// if policies != nil {
// for _, policy := range policies {
// if policy.Name == OctopusReplicationPolicyName {
// err := targetClient.DeleteReplicationPolicyByID(ctx, policy.ID)
// if err != nil {
// log.ErrorF("[Harbor Sync Project ] - delete exists replication policy failed ! => %v ", policy)
// }
// }
// }
// }
//
// log.Debug("[Harbor Sync Project ] - start to delete exist exist harbor endpoints !")
// exitRegistry, _ := targetClient.GetRegistryByName(ctx, OctopusSourceHarborName)
// if exitRegistry != nil {
// log.Debug("[Harbor Sync Project ] - source endpoints already exists ! delete it !")
// err := targetClient.DeleteRegistryByID(ctx, exitRegistry.ID)
// if err != nil {
// log.ErrorF("[Harbor Sync Project ] - source endpoints delete failed ! => %v ", exitRegistry)
// }
// }
//
// // todo cqga failed
// octopusSourceRegistry := &model.Registry{
// Credential: &model.RegistryCredential{
// AccessKey: "admin",
// AccessSecret: "V2ryStr@ngPss",
// Type: "basic",
// },
// Insecure: true,
// Name: OctopusSourceHarborName, // 源 Harbor 实例的注册表 ID通常为 0
// Type: "harbor",
// URL: strings.Split(hOp.SourceHarborHost, "/api")[0],
// }
// err := targetClient.NewRegistry(ctx, octopusSourceRegistry)
// if err != nil {
// sprintf := fmt.Sprintf("[SyncProjectExec] - source endpoints create failed ! => %s", err.Error())
// log.Error(sprintf)
// return false, []string{sprintf}
// }
//
// // get the real one for it's ID
// realOctopusSourceRegistry, err := targetClient.GetRegistryByName(ctx, OctopusSourceHarborName)
// if err != nil {
// sprintf := fmt.Sprintf("[SyncProjectExec] - get target registry id failed ! => %s", err.Error())
// log.Error(sprintf)
// return false, []string{sprintf}
// }
//
// // 创建复制策略
// octopusReplicationPolicy := &model.ReplicationPolicy{
// CopyByChunk: nil,
// Deletion: false,
// Description: "",
// DestNamespace: "", // 可以指定目标 Harbor 中的特定项目,如果为空,则使用源项目名称
// DestNamespaceReplaceCount: nil,
// SrcRegistry: &model.Registry{
// ID: realOctopusSourceRegistry.ID,
// },
// DestRegistry: &model.Registry{
// ID: 0,
// },
// Enabled: true,
// Filters: []*model.ReplicationFilter{
// {
// Type: "name",
// Value: needToSynchronizedProject + "/**",
// },
// },
// ID: 0,
// Name: OctopusReplicationPolicyName,
// Override: true,
// ReplicateDeletion: false,
// Speed: nil,
// Trigger: &model.ReplicationTrigger{
// Type: "manual", // 可以是 "manual", "scheduled", 或 "event_based"
// // 如果是 "scheduled",还需要设置 Cron 表达式
// // TriggerSettings: &model.TriggerSettings{Cron: "0 * * * *"},
// },
// }
//
// // 在源 Harbor 中创建复制策略
// log.InfoF("[Harbor Sync Project ] - Start To Sync Project => %s !", needToSynchronizedProject)
//
// err = targetClient.NewReplicationPolicy(ctx, octopusReplicationPolicy.DestRegistry, octopusReplicationPolicy.SrcRegistry, octopusReplicationPolicy.Deletion, octopusReplicationPolicy.Override, octopusReplicationPolicy.Enabled, octopusReplicationPolicy.Filters, octopusReplicationPolicy.Trigger, octopusReplicationPolicy.DestNamespace, octopusReplicationPolicy.Name, octopusReplicationPolicy.Name)
//
// if err != nil {
// syncErrorMessage := fmt.Sprintf("[Harbor Sync Project ] - Sync Project [ %s ] Failed ! Error is => %s\n", needToSynchronizedProject, err.Error())
// log.Error(syncErrorMessage)
// return false, []string{
// syncErrorMessage,
// }
// }
//
// realOctopusReplicationPolicy, err := targetClient.GetReplicationPolicyByName(ctx, OctopusReplicationPolicyName)
// if err != nil {
// return false, []string{
// "[Harbor Sync Project ] - failed to get the realOctopusReplicationPolicy!",
// err.Error(),
// }
// }
//
// err = targetClient.TriggerReplicationExecution(ctx, &model.StartReplicationExecution{
// PolicyID: realOctopusReplicationPolicy.ID,
// })
// if err != nil {
// return false, []string{
// "[ Harbor Sync Project ] - failed to start the harbor sync execution !",
// err.Error(),
// }
// }
//
// return true, []string{
// fmt.Sprintf("[ Harbor Sync Project ] - sync project [ %s ] started !", needToSynchronizedProject),
// }
//}
//
//func (hOp *HarborOperator) SyncStatusExec(funcArgs []string) (bool, []string) {
//
// if hOp.TargetHarborClient == nil {
// ok, createClient := hOp.CheckAndBuildHarborClient(funcArgs[0], "", true)
// if !ok {
// return false, []string{
// "[ Sync Status ] - Error !",
// }
// }
// hOp.TargetHarborClient = createClient
// }
// targetClient := hOp.TargetHarborClient
//
// ctx := context.Background()
// // check replication policy exists
//
// replicationPolicy, err := targetClient.GetReplicationPolicyByName(ctx, OctopusReplicationPolicyName)
// if err != nil {
// return false, []string{
// "[ Sync Status ] - get replication error !",
// err.Error(),
// }
// }
//
// // list execution status
// replicationExecutions, err := targetClient.ListReplicationExecutions(ctx, &replicationPolicy.ID, nil, nil)
// if err != nil {
// return false, []string{
// "[ Sync Status ] - replication has no sync work !",
// err.Error(),
// }
// }
//
// // find the newest one only have one here
// for _, execution := range replicationExecutions {
// if !strings.HasPrefix(execution.Status, "Succeed") {
// bytes, _ := json.Marshal(execution)
// log.InfoF("[sync status]- status are => %v", string(bytes))
// // report status
// return false, []string{
// fmt.Sprintf("[sync status] - not complete ! progress is => %s %%",
// strconv.FormatFloat(float64(execution.Succeed)/float64(execution.Total)*100, 'f', 2, 64)),
// }
// }
// }
//
// return true, []string{
// "[sync status]- sync completed !",
// }
//}
//
//func (hOp *HarborOperator) Command(baseFuncName string, funcArgs ...string) []string {
//
// return nil
//}

View File

@@ -1,346 +1,340 @@
package a_executor
import (
"fmt"
"strings"
"wdd.io/agent-common/image"
)
var LocalGzipImageFolderPrefix = "/var/lib/docker/image_sync/"
var DefaultSocks5ProxyUser = "zeaslity"
var DefaultSocks5ProxyPass = "password"
func (op *AgentOsOperator) Sync(baseFuncName string, funcArgs ...string) (bool, []string) {
resultOk := false
var errorLog []string
switch baseFuncName {
case "DOWNLOAD_DOCKER_IMAGE":
resultOk, errorLog = op.downloadDockerImage(funcArgs)
break
case "COMPRESS_IMAGE_TO_GZIP":
resultOk, errorLog = op.compressImageToGzip(funcArgs)
break
case "UPLOAD_GZIP_TO_OSS":
resultOk, errorLog = op.uploadGzipFileToOss(funcArgs)
break
case "DOWNLOAD_GZIP_IMAGE_FILE":
resultOk, errorLog = op.downloadGzipImageFile(funcArgs)
break
case "LOAD_DOCKER_IMAGE_FROM_GZIP":
resultOk, errorLog = op.loadDockerImageFromGzip(funcArgs)
break
case "PUSH_IMAGE_TO_TARGET_HARBOR":
resultOk, errorLog = op.pushImageToTargetHarbor(funcArgs)
break
case "UPDATE_IMAGE_TAG":
resultOk, errorLog = op.updateImageTag(funcArgs)
break
default:
resultOk, errorLog = op.okExec(funcArgs)
}
return resultOk, errorLog
}
func (op *AgentOsOperator) downloadDockerImage(funcArgs []string) (bool, []string) {
// funcArgs are imageFullName gzipFolderPrefix gzipFileName ossUrlPrefix namespace newImageTag
if !BasicCommandExistByPath("docker") {
return false, []string{
"docker not exits !",
}
}
imageFullName := funcArgs[0]
log.InfoF("[downloadDockerImage]- start to pull docker image %s", imageFullName)
// login
if strings.HasPrefix(imageFullName, image.CmiiHarborPrefix) {
HardCodeCommandExecutor("docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn")
}
if !PureResultSingleExecute([]string{
"docker",
"pull",
imageFullName,
}) {
return false, []string{
"docker pull failed of " + imageFullName,
}
}
if !BasicDockerImageExistByFullName(funcArgs[0]) {
return false, []string{
"image not exits ! unknown error happened!",
}
}
return true, []string{
imageFullName,
}
}
func (op *AgentOsOperator) compressImageToGzip(funcArgs []string) (bool, []string) {
if !BasicCommandExistByPath("docker") {
return false, []string{
"docker not exits !",
}
}
if !BasicCommandExistByPath("gzip") {
return false, []string{
"gzip not exits !",
}
}
gzipFolderPrefix := funcArgs[1]
if !BasicFolderExists(gzipFolderPrefix) {
BasicCreateFolder(gzipFolderPrefix)
}
imageFullName := funcArgs[0]
if !BasicDockerImageExistByFullName(imageFullName) {
return false, []string{
"image not exits !",
}
}
if !strings.HasSuffix(gzipFolderPrefix, "/") {
gzipFolderPrefix += "/"
}
gzipImageFromFullName := image.ImageFullNameToGzipFileName(imageFullName)
dockerSaveCommand := "docker save " + imageFullName + " | gzip > " + gzipFolderPrefix + gzipImageFromFullName
executor, i := HardCodeCommandExecutor(dockerSaveCommand)
if !executor {
return false, i
}
if !BasicFileExistAndNotNull(gzipFolderPrefix + gzipImageFromFullName) {
return false, []string{
"gzip of ile error ",
}
}
return true, []string{
gzipImageFromFullName,
}
}
func (op *AgentOsOperator) uploadGzipFileToOss(funcArgs []string) (bool, []string) {
if !BasicCommandExistByPath("mc") {
return false, []string{
"mc not exits!",
}
}
gzipFolderPrefix := funcArgs[1]
gzipImageFromFullName := funcArgs[2]
ok, resultLog := HardCodeCommandExecutor("mc --insecure alias set demo https://oss.ig-demo.uavcmlc.com cmii B#923fC7mk")
//ok, resultLog = HardCodeCommandExecutor("mc alias list")
PureResultSingleExecute([]string{
"mc",
"rm",
"demo/cmlc-installation/tmp/" + gzipImageFromFullName,
})
ok, resultLog = AllCommandExecutor([]string{
"mc",
"cp",
gzipFolderPrefix + gzipImageFromFullName,
"demo/cmlc-installation/tmp/" + gzipImageFromFullName,
})
if !ok {
return false, resultLog
}
find, _ := BasicFindContentInCommandOutput("mc ls demo/cmlc-installation/tmp/", gzipImageFromFullName)
if !find {
return false, []string{
"demo oss can't find gzip file !",
}
}
return true, []string{
gzipImageFromFullName,
}
}
func (op *AgentOsOperator) downloadGzipImageFile(funcArgs []string) (bool, []string) {
ossUrlPrefix := funcArgs[3]
gzipImageFromFullName := funcArgs[2]
proxyUrl := funcArgs[4]
// create folder
BasicCreateFolder(LocalGzipImageFolderPrefix)
// remove file
desFile := LocalGzipImageFolderPrefix + gzipImageFromFullName
if !BasicRemoveFileOrFolder(desFile) {
return false, []string{
"file already exits ! can't remove it!",
}
}
//var download bool
//var downloadLog []string
//if proxyUrl == "" {
// download, downloadLog = BasicDownloadFileByCurl(ossUrlPrefix+gzipImageFromFullName, desFile)
//} else {
// = BasicDownloadFileWithProxy(ossUrlPrefix+gzipImageFromFullName, proxyUrl, desFile)
//}
download, downloadLog := BasicDownloadFile(ossUrlPrefix+gzipImageFromFullName, proxyUrl, DefaultSocks5ProxyUser, DefaultSocks5ProxyPass, desFile)
if !download {
return false, downloadLog
}
return true, []string{
desFile,
}
}
func (op *AgentOsOperator) loadDockerImageFromGzip(funcArgs []string) (bool, []string) {
gzipImageFromFullName := funcArgs[2]
if !BasicFileExistAndNotNull(LocalGzipImageFolderPrefix + gzipImageFromFullName) {
return false, []string{
LocalGzipImageFolderPrefix + gzipImageFromFullName,
"local gzip file not exits!",
}
}
hardCodeCommand := "docker load < " + LocalGzipImageFolderPrefix + gzipImageFromFullName
executor, i := HardCodeCommandExecutor(hardCodeCommand)
if !executor {
return false, i
}
if !BasicDockerImageExistByFullName(funcArgs[0]) {
return false, []string{
"docker load from gzip file error ! image not exits!",
funcArgs[0],
}
}
return true, nil
}
func (op *AgentOsOperator) pushImageToTargetHarbor(funcArgs []string) (bool, []string) {
targetHarborHost := funcArgs[5]
imageFullName := funcArgs[0]
if !strings.Contains(targetHarborHost, "8033") {
targetHarborHost += ":8033"
}
targetImageFullName := image.ImageNameToTargetImageFullName(imageFullName, targetHarborHost)
if !PureResultSingleExecute([]string{
"docker",
"tag",
imageFullName,
targetImageFullName,
}) {
return false, []string{
"docker tag error!",
}
}
if strings.HasPrefix(targetImageFullName, image.CmiiHarborPrefix) {
HardCodeCommandExecutor("docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn")
} else {
HardCodeCommandExecutor("docker login -u admin -p V2ryStr@ngPss " + targetHarborHost)
}
ok, resultLog := AllCommandExecutor([]string{
"docker",
"push",
targetImageFullName,
})
if !ok {
return false, resultLog
}
return true, []string{
targetImageFullName,
}
}
func (op *AgentOsOperator) updateImageTag(funcArgs []string) (bool, []string) {
namespace := funcArgs[6]
imageFullName := funcArgs[0]
if !strings.HasPrefix(imageFullName, image.CmiiHarborPrefix) {
return false, []string{
"cant update this image !",
}
}
appName := image.ImageFullNameToAppName(imageFullName)
newTag := image.ImageFullNameToImageTag(imageFullName)
sprintf := fmt.Sprintf("start to update [%s] image tag [%s] to [%s]", namespace, appName, newTag)
log.Info(sprintf)
update, resultLog := K8sDeploymentUpdateTag(namespace, appName, newTag)
if !update {
return false, []string{
sprintf,
}
}
return true, []string{
resultLog,
}
}
func (op *AgentOsOperator) updateImageTagByFile(funcArgs []string) (bool, []string) {
namespace := funcArgs[6]
//targetImageFullName := funcArgs[7]
proxyUrl := funcArgs[4]
if !BasicCommandExistByPath("kubectl") {
return false, []string{
"kubectl not exits !",
}
}
imageFullName := funcArgs[0]
if !strings.HasPrefix(imageFullName, image.CmiiHarborPrefix) {
return false, []string{
"cant update this image !",
}
}
appName := image.ImageFullNameToAppName(imageFullName)
// 2024年4月7日 修改为 exec file的模式
folderPrefix := "/root/wdd/update/"
BasicCreateFolder(folderPrefix)
updateFileName := "update-app-tag.sh"
if !BasicFileExistAndNotNull(folderPrefix + updateFileName) {
// kubectl update tag file not exits!
download, downloadLog := BasicDownloadFile(AgentOsOperatorCache.OssOfflinePrefix+updateFileName, proxyUrl, DefaultSocks5ProxyUser, DefaultSocks5ProxyPass, folderPrefix+updateFileName)
if !download {
return false, downloadLog
}
}
PureResultSingleExecute([]string{
"chomd",
"+x",
folderPrefix + updateFileName,
})
newTag := image.ImageFullNameToImageTag(imageFullName)
log.InfoF("start do update %s %s to %s", namespace, appName, newTag)
updateCommand := "bash " + folderPrefix + updateFileName + namespace + appName + newTag
executor, i := HardCodeCommandExecutor(updateCommand)
if !executor {
return false, i
}
return true, i
}
//var LocalGzipImageFolderPrefix = "/var/lib/docker/image_sync/"
//var DefaultSocks5ProxyUser = "zeaslity"
//var DefaultSocks5ProxyPass = "password"
//
//func (op *AgentOsOperator) Sync(baseFuncName string, funcArgs ...string) (bool, []string) {
// resultOk := false
// var errorLog []string
//
// switch baseFuncName {
//
// case "DOWNLOAD_DOCKER_IMAGE":
// resultOk, errorLog = op.downloadDockerImage(funcArgs)
// break
// case "COMPRESS_IMAGE_TO_GZIP":
// resultOk, errorLog = op.compressImageToGzip(funcArgs)
// break
// case "UPLOAD_GZIP_TO_OSS":
// resultOk, errorLog = op.uploadGzipFileToOss(funcArgs)
// break
// case "DOWNLOAD_GZIP_IMAGE_FILE":
// resultOk, errorLog = op.downloadGzipImageFile(funcArgs)
// break
// case "LOAD_DOCKER_IMAGE_FROM_GZIP":
// resultOk, errorLog = op.loadDockerImageFromGzip(funcArgs)
// break
// case "PUSH_IMAGE_TO_TARGET_HARBOR":
// resultOk, errorLog = op.pushImageToTargetHarbor(funcArgs)
// break
// case "UPDATE_IMAGE_TAG":
// resultOk, errorLog = op.updateImageTag(funcArgs)
// break
// default:
// resultOk, errorLog = op.okExec(funcArgs)
// }
//
// return resultOk, errorLog
//}
//
//func (op *AgentOsOperator) downloadDockerImage(funcArgs []string) (bool, []string) {
// // funcArgs are imageFullName gzipFolderPrefix gzipFileName ossUrlPrefix namespace newImageTag
//
// if !BasicCommandExistByPath("docker") {
// return false, []string{
// "docker not exits !",
// }
// }
// imageFullName := funcArgs[0]
// log.InfoF("[downloadDockerImage]- start to pull docker image %s", imageFullName)
//
// // login
// if strings.HasPrefix(imageFullName, image.CmiiHarborPrefix) {
// HardCodeCommandExecutor("docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn")
// }
//
// if !PureResultSingleExecute([]string{
// "docker",
// "pull",
// imageFullName,
// }) {
// return false, []string{
// "docker pull failed of " + imageFullName,
// }
// }
//
// if !BasicDockerImageExistByFullName(funcArgs[0]) {
// return false, []string{
// "image not exits ! unknown error happened!",
// }
// }
// return true, []string{
// imageFullName,
// }
//}
//
//func (op *AgentOsOperator) compressImageToGzip(funcArgs []string) (bool, []string) {
// if !BasicCommandExistByPath("docker") {
// return false, []string{
// "docker not exits !",
// }
// }
// if !BasicCommandExistByPath("gzip") {
// return false, []string{
// "gzip not exits !",
// }
// }
//
// gzipFolderPrefix := funcArgs[1]
// if !BasicFolderExists(gzipFolderPrefix) {
// BasicCreateFolder(gzipFolderPrefix)
// }
//
// imageFullName := funcArgs[0]
// if !BasicDockerImageExistByFullName(imageFullName) {
// return false, []string{
// "image not exits !",
// }
// }
//
// if !strings.HasSuffix(gzipFolderPrefix, "/") {
// gzipFolderPrefix += "/"
// }
//
// gzipImageFromFullName := image.ImageFullNameToGzipFileName(imageFullName)
// dockerSaveCommand := "docker save " + imageFullName + " | gzip > " + gzipFolderPrefix + gzipImageFromFullName
//
// executor, i := HardCodeCommandExecutor(dockerSaveCommand)
// if !executor {
// return false, i
// }
//
// if !BasicFileExistAndNotNull(gzipFolderPrefix + gzipImageFromFullName) {
// return false, []string{
// "gzip of ile error ",
// }
// }
//
// return true, []string{
// gzipImageFromFullName,
// }
//}
//
//func (op *AgentOsOperator) uploadGzipFileToOss(funcArgs []string) (bool, []string) {
//
// if !BasicCommandExistByPath("mc") {
// return false, []string{
// "mc not exits!",
// }
// }
//
// gzipFolderPrefix := funcArgs[1]
// gzipImageFromFullName := funcArgs[2]
//
// ok, resultLog := HardCodeCommandExecutor("mc --insecure alias set demo https://oss.ig-demo.uavcmlc.com cmii B#923fC7mk")
// //ok, resultLog = HardCodeCommandExecutor("mc alias list")
//
// PureResultSingleExecute([]string{
// "mc",
// "rm",
// "demo/cmlc-installation/tmp/" + gzipImageFromFullName,
// })
//
// ok, resultLog = AllCommandExecutor([]string{
// "mc",
// "cp",
// gzipFolderPrefix + gzipImageFromFullName,
// "demo/cmlc-installation/tmp/" + gzipImageFromFullName,
// })
// if !ok {
// return false, resultLog
// }
//
// find, _ := BasicFindContentInCommandOutput("mc ls demo/cmlc-installation/tmp/", gzipImageFromFullName)
// if !find {
// return false, []string{
// "demo oss can't find gzip file !",
// }
// }
//
// return true, []string{
// gzipImageFromFullName,
// }
//
//}
//
//func (op *AgentOsOperator) downloadGzipImageFile(funcArgs []string) (bool, []string) {
//
// ossUrlPrefix := funcArgs[3]
// gzipImageFromFullName := funcArgs[2]
// proxyUrl := funcArgs[4]
//
// // create folder
// BasicCreateFolder(LocalGzipImageFolderPrefix)
//
// // remove file
// desFile := LocalGzipImageFolderPrefix + gzipImageFromFullName
//
// if !BasicRemoveFileOrFolder(desFile) {
// return false, []string{
// "file already exits ! can't remove it!",
// }
// }
//
// //var download bool
// //var downloadLog []string
// //if proxyUrl == "" {
// // download, downloadLog = BasicDownloadFileByCurl(ossUrlPrefix+gzipImageFromFullName, desFile)
// //} else {
// // = BasicDownloadFileWithProxy(ossUrlPrefix+gzipImageFromFullName, proxyUrl, desFile)
// //}
//
// download, downloadLog := BasicDownloadFile(ossUrlPrefix+gzipImageFromFullName, proxyUrl, DefaultSocks5ProxyUser, DefaultSocks5ProxyPass, desFile)
// if !download {
// return false, downloadLog
// }
//
// return true, []string{
// desFile,
// }
//}
//
//func (op *AgentOsOperator) loadDockerImageFromGzip(funcArgs []string) (bool, []string) {
// gzipImageFromFullName := funcArgs[2]
//
// if !BasicFileExistAndNotNull(LocalGzipImageFolderPrefix + gzipImageFromFullName) {
// return false, []string{
// LocalGzipImageFolderPrefix + gzipImageFromFullName,
// "local gzip file not exits!",
// }
// }
//
// hardCodeCommand := "docker load < " + LocalGzipImageFolderPrefix + gzipImageFromFullName
// executor, i := HardCodeCommandExecutor(hardCodeCommand)
// if !executor {
// return false, i
// }
//
// if !BasicDockerImageExistByFullName(funcArgs[0]) {
// return false, []string{
// "docker load from gzip file error ! image not exits!",
// funcArgs[0],
// }
// }
//
// return true, nil
//}
//func (op *AgentOsOperator) pushImageToTargetHarbor(funcArgs []string) (bool, []string) {
//
// targetHarborHost := funcArgs[5]
// imageFullName := funcArgs[0]
//
// if !strings.Contains(targetHarborHost, "8033") {
// targetHarborHost += ":8033"
// }
//
// targetImageFullName := image.ImageNameToTargetImageFullName(imageFullName, targetHarborHost)
//
// if !PureResultSingleExecute([]string{
// "docker",
// "tag",
// imageFullName,
// targetImageFullName,
// }) {
// return false, []string{
// "docker tag error!",
// }
// }
// if strings.HasPrefix(targetImageFullName, image.CmiiHarborPrefix) {
// HardCodeCommandExecutor("docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn")
// } else {
// HardCodeCommandExecutor("docker login -u admin -p V2ryStr@ngPss " + targetHarborHost)
// }
//
// ok, resultLog := AllCommandExecutor([]string{
// "docker",
// "push",
// targetImageFullName,
// })
// if !ok {
// return false, resultLog
// }
//
// return true, []string{
// targetImageFullName,
// }
//}
//
//func (op *AgentOsOperator) updateImageTag(funcArgs []string) (bool, []string) {
// namespace := funcArgs[6]
// imageFullName := funcArgs[0]
// if !strings.HasPrefix(imageFullName, image.CmiiHarborPrefix) {
// return false, []string{
// "cant update this image !",
// }
// }
// appName := image.ImageFullNameToAppName(imageFullName)
// newTag := image.ImageFullNameToImageTag(imageFullName)
//
// sprintf := fmt.Sprintf("start to update [%s] image tag [%s] to [%s]", namespace, appName, newTag)
// log.Info(sprintf)
//
// update, resultLog := K8sDeploymentUpdateTag(namespace, appName, newTag)
// if !update {
// return false, []string{
// sprintf,
// }
// }
// return true, []string{
// resultLog,
// }
//}
//
//func (op *AgentOsOperator) updateImageTagByFile(funcArgs []string) (bool, []string) {
// namespace := funcArgs[6]
// //targetImageFullName := funcArgs[7]
// proxyUrl := funcArgs[4]
// if !BasicCommandExistByPath("kubectl") {
// return false, []string{
// "kubectl not exits !",
// }
// }
// imageFullName := funcArgs[0]
// if !strings.HasPrefix(imageFullName, image.CmiiHarborPrefix) {
// return false, []string{
// "cant update this image !",
// }
// }
// appName := image.ImageFullNameToAppName(imageFullName)
//
// // 2024年4月7日 修改为 exec file的模式
// folderPrefix := "/root/wdd/update/"
// BasicCreateFolder(folderPrefix)
//
// updateFileName := "update-app-tag.sh"
// if !BasicFileExistAndNotNull(folderPrefix + updateFileName) {
// // kubectl update tag file not exits!
// download, downloadLog := BasicDownloadFile(AgentOsOperatorCache.OssOfflinePrefix+updateFileName, proxyUrl, DefaultSocks5ProxyUser, DefaultSocks5ProxyPass, folderPrefix+updateFileName)
//
// if !download {
// return false, downloadLog
// }
// }
//
// PureResultSingleExecute([]string{
// "chomd",
// "+x",
// folderPrefix + updateFileName,
// })
//
// newTag := image.ImageFullNameToImageTag(imageFullName)
// log.InfoF("start do update %s %s to %s", namespace, appName, newTag)
// updateCommand := "bash " + folderPrefix + updateFileName + namespace + appName + newTag
// executor, i := HardCodeCommandExecutor(updateCommand)
// if !executor {
// return false, i
// }
//
// return true, i
//}

View File

@@ -1,459 +1,460 @@
package a_executor
import (
"context"
"fmt"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"os"
"strings"
"sync"
"time"
image2 "wdd.io/agent-common/image"
)
var k8sConfigFilePath = "/root/wdd/kube_config_cluster.yml"
var k8sClient = newK8sClientInstance()
func newK8sClientInstance() *kubernetes.Clientset {
once := sync.Once{}
if !BasicFileExistAndNotNull(k8sConfigFilePath) {
log.WarnF("[newK8sClientInstance] - k8s config %s does not exist ! ", k8sConfigFilePath)
return nil
}
var client *kubernetes.Clientset
once.Do(func() {
// 使用kubeconfig文件初始化客户端
config, err := clientcmd.BuildConfigFromFlags("", k8sConfigFilePath)
if err != nil {
log.ErrorF("[newK8sClientInstance] - load from %s error !", k8sConfigFilePath)
}
client, err = kubernetes.NewForConfig(config)
if err != nil {
log.Error("[newK8sClientInstance] - create k8s client error !")
}
})
return client
}
func K8sCheckPodStatusTimeout(specificPod string, supreme string, waitTimeOut int) bool {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return false
}
}
// 设置超时时间和时间间隔
timeout := time.After(time.Duration(waitTimeOut) * time.Second)
tick := time.Tick(5 * time.Second)
// 监控Pod状态
for {
select {
case <-timeout:
log.ErrorF("[K8sCheckPodStatusTimeout] - 命名空间: [%s], Pod名称: [%s], 状态: 失败!", supreme, specificPod)
return false
case <-tick:
pod, err := k8sClient.CoreV1().Pods(supreme).Get(context.TODO(), specificPod, metav1.GetOptions{})
if err != nil {
log.ErrorF("[K8sCheckPodStatusTimeout] - 命名空间: [%s], Pod名称: [%s], 获取Pod信息失败 ", supreme, err.Error())
} else {
log.DebugF("[K8sCheckPodStatusTimeout] - 命名空间: [%s], Pod名称: [%s], 状态: [%s]", supreme, pod.Name, pod.Status.Phase)
if pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded {
return true
}
}
}
}
}
func K8sCheckDeploymentStatusTimeout(specificDeployment string, supreme string, waitTimeOut int) bool {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return false
}
}
// 设置超时时间和时间间隔
timeout := time.After(time.Duration(waitTimeOut) * time.Second)
tick := time.Tick(5 * time.Second)
// 监控Pod状态
for {
select {
case <-timeout:
log.ErrorF("[K8sCheckDeploymentStatusTimeout] - 命名空间: %s, Deployment名称: %s, 状态: 失败!\n", supreme, specificDeployment)
return false
case <-tick:
deployment, err := k8sClient.AppsV1().Deployments(supreme).Get(context.TODO(), specificDeployment, metav1.GetOptions{})
if err != nil {
log.ErrorF("[K8sCheckDeploymentStatusTimeout] - 命名空间: [%s], Deployment 名称: [%s], 获取Deployment信息失败 ", supreme, err.Error())
} else {
log.DebugF("[K8sCheckDeploymentStatusTimeout] - 命名空间: [ %s ], Deployment: [ %s ] 还有Pods未处于Running状态 (Ready: %d, Total: %d)\n", supreme, deployment.Name, deployment.Status.ReadyReplicas, deployment.Status.Replicas)
if deployment.Status.ReadyReplicas == deployment.Status.Replicas {
return true
}
}
}
}
}
func K8sDeploymentUpdateTag(supreme, appName, newTag string) (bool, string) {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return false, ""
}
}
if newTag == "" {
log.WarnF("[K8sDeploymentUpdateTag] - can not update image tag to null!")
return false, ""
}
deployment := K8sDeploymentExists(supreme, appName)
if deployment == nil {
return false, ""
}
updateResultLog := ""
containers := deployment.Spec.Template.Spec.Containers
// 只支持container的数量为1的形式
container := containers[0]
oldName := container.Image
split := strings.Split(container.Image, ":")
if strings.HasPrefix(container.Image, image2.CmiiHarborPrefix) {
// harbor
container.Image = split[0] + ":" + newTag
} else if strings.Contains(container.Image, "8033") {
// 192.168.6.6:8033/rancher/k8s-dns-sidecar:v1.0.2
// 重新拼接
container.Image = split[0] + ":" + split[1] + ":" + newTag
}
log.DebugF("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s]", deployment.Namespace, appName, oldName, container.Image)
// 更新Cmii BIZ_GROUP
tagVersion := newTag
if strings.Contains(newTag, "-") {
tagVersion = strings.Split(newTag, "-")[0]
}
envList := container.Env
for index, envVar := range envList {
if envVar.Name == "IMAGE_VERSION" {
envList[index].Value = tagVersion
}
if envVar.Name == "BIZ_CONFIG_GROUP" {
envList[index].Value = tagVersion
}
if envVar.Name == "SYS_CONFIG_GROUP" {
envList[index].Value = tagVersion
}
}
log.DebugF("[DeploymentUpdateTag] - update env IMAGE_VERSION to [%s]", tagVersion)
// 赋值回去 很关键
deployment.Spec.Template.Spec.Containers[0] = container
// update
_, err := k8sClient.AppsV1().Deployments(deployment.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
if err != nil {
sprintf := fmt.Sprintf("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s] error ! %s", deployment.Namespace, appName, split[1], container.Image, err.Error())
log.Error(sprintf)
return false, sprintf
}
return true, updateResultLog
}
func K8sDeploymentExists(supreme, appName string) *v1.Deployment {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return nil
}
}
deployment, err := k8sClient.AppsV1().Deployments(supreme).Get(context.TODO(), appName, metav1.GetOptions{})
if err != nil {
log.ErrorF("[DeploymentExist] - deployments [%s] [%s] not exists ! %s", supreme, appName, err.Error())
return nil
}
return deployment
}
func K8sListPVCInNamespace(supreme string) (bool, []string) {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return false, []string{
"[K8sListPVCInNamespace] - k8s client not exits !",
}
}
}
pvcs, err := k8sClient.CoreV1().PersistentVolumeClaims(supreme).List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.ErrorF("[K8sListPVCInNamespace] - error list pvc list in namespace %s", supreme)
return false, nil
}
var pvcList []string
for _, pvc := range pvcs.Items {
pvcList = append(pvcList, pvc.Name)
}
log.DebugF("K8sListPVCInNamespace - all pvc in namespace of [ %s ] are %v", supreme, pvcList)
return true, pvcList
}
func K8sCheckPVCStatusTimeOut(specificPvcName string, supreme string, waitTimeOut int) bool {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return false
}
}
// 设置超时时间和时间间隔
timeout := time.After(time.Duration(waitTimeOut) * time.Second)
tick := time.Tick(5 * time.Second)
// 监控Pod状态
for {
select {
case <-timeout:
log.ErrorF("[K8sCheckPVCStatusTimeOut] - 命名空间: %s, PVC 名称: %s, 状态: 失败! ", supreme, specificPvcName)
return false
case <-tick:
pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(supreme).Get(context.TODO(), specificPvcName, metav1.GetOptions{})
if err != nil {
log.ErrorF("[K8sCheckPVCStatusTimeOut] - 命名空间: [ %s ], 获取 PVC [%s] 信息失败: %s ", supreme, specificPvcName, err.Error())
}
if pvc.Status.Phase == corev1.ClaimBound {
log.DebugF("[K8sCheckPVCStatusTimeOut] - PVC %s in namespace %s is running", specificPvcName, supreme)
return true
} else {
log.WarnF("[K8sCheckPVCStatusTimeOut] - PVC %s in namespace %s run failed !", specificPvcName, supreme)
return false
}
}
}
}
func KubectlCheckPodStatus(specific string, supreme string) bool {
if !BasicCommandExists("kubectl") {
log.Error("kubectl命令不存在无法查看Pod状态请查看")
}
ok, resultLog := AllCommandExecutor([]string{
"kubectl", "-n", supreme, "get", "pod", specific, "-o", "jsonpath='{.status.phase}'",
})
if !ok {
return false
}
for _, resultLine := range resultLog {
if strings.HasPrefix(resultLine, "Running") {
return true
}
}
return false
}
func KubectlApplyExec(resourcesYamlFile string) (bool, []string) {
// check kubectl
if !BasicCommandExistByPath("kubectl") {
return false, []string{
"[KubectlApplyExec] - kubectl command not exist !",
}
}
// check resourcesYamlFile
if !BasicFileExistAndNotNull(resourcesYamlFile) {
return false, []string{
fmt.Sprintf("[KubectlApplyExec] - wrong resourcesYamlFile %s not exist !", resourcesYamlFile),
}
}
// apply -f
ok, resultLog := AllCommandExecutor([]string{
"/usr/local/bin/kubectl",
"apply",
"-f",
resourcesYamlFile,
})
if !ok {
return false, resultLog
}
return true, append(resultLog,
fmt.Sprintf("[KubectlApplyExec] - %s apply success!", resourcesYamlFile))
}
func KubectlDeleteExec(resourcesYamlFile string) (bool, []string) {
// check kubectl
if !BasicCommandExistByPath("kubectl") {
return false, []string{
"[KubectlDeleteExec] - kubectl command not exist !",
}
}
// check resourcesYamlFile
if !BasicFileExistAndNotNull(resourcesYamlFile) {
return false, []string{
fmt.Sprintf("[KubectlDeleteExec] - wrong resourcesYamlFile %s not exist !", resourcesYamlFile),
}
}
// apply -f
ok, resultLog := AllCommandExecutor([]string{
"/usr/local/bin/kubectl",
"delete",
"-f",
resourcesYamlFile,
})
if !ok {
return false, resultLog
}
return true, append(resultLog,
fmt.Sprintf("[KubectlDeleteExec] - %s delete success!", resourcesYamlFile))
}
func K8sCreateNamespace(namespaceName string) bool {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return false
}
}
namespace, err := k8sClient.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
if err == nil {
log.InfoF("[K8sCreateNamespace] - namespace of [%s] already exists!", namespaceName)
return true
}
// create namespace
// 创建命名空间对象
namespace = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespaceName,
},
}
// 使用客户端创建命名空间
namespace, err = k8sClient.CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
if err != nil {
log.ErrorF("Error getting namespace: %s ", err.Error())
return false
}
// check namespace exists
// 尝试获取名为 "xxg" 的命名空间
namespace, err = k8sClient.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
// 如果返回错误,需要判断是因为命名空间不存在还是其他错误
if err != nil {
if errors.IsNotFound(err) {
log.ErrorF("Namespace %s cant be got !", namespaceName)
return false
} else {
log.ErrorF("Error retrieving namespace: %s\n", err.Error())
}
return false
}
log.DebugF("Namespace %s create successful !", namespaceName)
return true
}
func K8sGetDashBoardAuthKey() bool {
if k8sClient == nil {
// this should be the first call of k8s function
k8sClient = newK8sClientInstance()
if k8sClient == nil {
log.ErrorF("k8s client is nil, run k8s function error !")
return false
}
}
// 获取 kube-system 命名空间的 secrets 列表
secrets, err := k8sClient.CoreV1().Secrets("kube-system").List(context.TODO(), metav1.ListOptions{})
if err != nil {
fmt.Printf("Error retrieving secrets from kube-system namespace: %s\n", err.Error())
os.Exit(1)
}
// 过滤出名为 admin-user 的 secret
var adminUserSecretName string
for _, secret := range secrets.Items {
if strings.Contains(secret.Name, "admin-user") {
adminUserSecretName = secret.Name
break
}
}
if adminUserSecretName == "" {
fmt.Println("No admin-user secret found")
os.Exit(1)
}
// 获取并打印特定的 secret 描述信息
secret, err := k8sClient.CoreV1().Secrets("kube-system").Get(context.TODO(), adminUserSecretName, metav1.GetOptions{})
if err != nil {
fmt.Printf("Error retrieving secret %s: %s\n", adminUserSecretName, err.Error())
os.Exit(1)
}
// 打印 secret 的详细信息,根据需要格式化输出
fmt.Printf("Name: %s\nNamespace: %s\nData:\n", secret.Name, secret.Namespace)
for key, value := range secret.Data {
fmt.Printf("%s: %s\n", key, value)
}
return false
}
//
//import (
// "context"
// "fmt"
// v1 "k8s.io/api/apps/v1"
// corev1 "k8s.io/api/core/v1"
// "k8s.io/apimachinery/pkg/api/errors"
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// "k8s.io/client-go/kubernetes"
// "k8s.io/client-go/tools/clientcmd"
// "os"
// "strings"
// "sync"
// "time"
// image2 "wdd.io/agent-common/image"
//)
//
//var k8sConfigFilePath = "/root/wdd/kube_config_cluster.yml"
//var k8sClient = newK8sClientInstance()
//
//func newK8sClientInstance() *kubernetes.Clientset {
// once := sync.Once{}
//
// if !BasicFileExistAndNotNull(k8sConfigFilePath) {
// log.WarnF("[newK8sClientInstance] - k8s config %s does not exist ! ", k8sConfigFilePath)
// return nil
// }
//
// var client *kubernetes.Clientset
// once.Do(func() {
// // 使用kubeconfig文件初始化客户端
// config, err := clientcmd.BuildConfigFromFlags("", k8sConfigFilePath)
// if err != nil {
// log.ErrorF("[newK8sClientInstance] - load from %s error !", k8sConfigFilePath)
//
// }
//
// client, err = kubernetes.NewForConfig(config)
// if err != nil {
// log.Error("[newK8sClientInstance] - create k8s client error !")
// }
// })
//
// return client
//}
//
//func K8sCheckPodStatusTimeout(specificPod string, supreme string, waitTimeOut int) bool {
//
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return false
// }
// }
//
// // 设置超时时间和时间间隔
// timeout := time.After(time.Duration(waitTimeOut) * time.Second)
// tick := time.Tick(5 * time.Second)
//
// // 监控Pod状态
// for {
// select {
// case <-timeout:
// log.ErrorF("[K8sCheckPodStatusTimeout] - 命名空间: [%s], Pod名称: [%s], 状态: 失败!", supreme, specificPod)
// return false
// case <-tick:
// pod, err := k8sClient.CoreV1().Pods(supreme).Get(context.TODO(), specificPod, metav1.GetOptions{})
// if err != nil {
// log.ErrorF("[K8sCheckPodStatusTimeout] - 命名空间: [%s], Pod名称: [%s], 获取Pod信息失败 ", supreme, err.Error())
// } else {
// log.DebugF("[K8sCheckPodStatusTimeout] - 命名空间: [%s], Pod名称: [%s], 状态: [%s]", supreme, pod.Name, pod.Status.Phase)
// if pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded {
// return true
// }
// }
// }
// }
//}
//
//func K8sCheckDeploymentStatusTimeout(specificDeployment string, supreme string, waitTimeOut int) bool {
//
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return false
// }
// }
//
// // 设置超时时间和时间间隔
// timeout := time.After(time.Duration(waitTimeOut) * time.Second)
// tick := time.Tick(5 * time.Second)
//
// // 监控Pod状态
// for {
// select {
// case <-timeout:
// log.ErrorF("[K8sCheckDeploymentStatusTimeout] - 命名空间: %s, Deployment名称: %s, 状态: 失败!\n", supreme, specificDeployment)
// return false
// case <-tick:
// deployment, err := k8sClient.AppsV1().Deployments(supreme).Get(context.TODO(), specificDeployment, metav1.GetOptions{})
// if err != nil {
// log.ErrorF("[K8sCheckDeploymentStatusTimeout] - 命名空间: [%s], Deployment 名称: [%s], 获取Deployment信息失败 ", supreme, err.Error())
// } else {
// log.DebugF("[K8sCheckDeploymentStatusTimeout] - 命名空间: [ %s ], Deployment: [ %s ] 还有Pods未处于Running状态 (Ready: %d, Total: %d)\n", supreme, deployment.Name, deployment.Status.ReadyReplicas, deployment.Status.Replicas)
//
// if deployment.Status.ReadyReplicas == deployment.Status.Replicas {
// return true
// }
//
// }
// }
// }
//}
//
//func K8sDeploymentUpdateTag(supreme, appName, newTag string) (bool, string) {
//
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return false, ""
// }
// }
//
// if newTag == "" {
// log.WarnF("[K8sDeploymentUpdateTag] - can not update image tag to null!")
// return false, ""
// }
//
// deployment := K8sDeploymentExists(supreme, appName)
// if deployment == nil {
// return false, ""
// }
//
// updateResultLog := ""
//
// containers := deployment.Spec.Template.Spec.Containers
//
// // 只支持container的数量为1的形式
// container := containers[0]
//
// oldName := container.Image
// split := strings.Split(container.Image, ":")
// if strings.HasPrefix(container.Image, image2.CmiiHarborPrefix) {
// // harbor
// container.Image = split[0] + ":" + newTag
// } else if strings.Contains(container.Image, "8033") {
// // 192.168.6.6:8033/rancher/k8s-dns-sidecar:v1.0.2
// // 重新拼接
// container.Image = split[0] + ":" + split[1] + ":" + newTag
// }
// log.DebugF("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s]", deployment.Namespace, appName, oldName, container.Image)
//
// // 更新Cmii BIZ_GROUP
// tagVersion := newTag
// if strings.Contains(newTag, "-") {
// tagVersion = strings.Split(newTag, "-")[0]
// }
// envList := container.Env
// for index, envVar := range envList {
// if envVar.Name == "IMAGE_VERSION" {
// envList[index].Value = tagVersion
// }
// if envVar.Name == "BIZ_CONFIG_GROUP" {
// envList[index].Value = tagVersion
// }
// if envVar.Name == "SYS_CONFIG_GROUP" {
// envList[index].Value = tagVersion
// }
// }
// log.DebugF("[DeploymentUpdateTag] - update env IMAGE_VERSION to [%s]", tagVersion)
//
// // 赋值回去 很关键
// deployment.Spec.Template.Spec.Containers[0] = container
//
// // update
// _, err := k8sClient.AppsV1().Deployments(deployment.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
// if err != nil {
// sprintf := fmt.Sprintf("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s] error ! %s", deployment.Namespace, appName, split[1], container.Image, err.Error())
// log.Error(sprintf)
// return false, sprintf
//
// }
//
// return true, updateResultLog
//}
//
//func K8sDeploymentExists(supreme, appName string) *v1.Deployment {
//
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return nil
// }
// }
//
// deployment, err := k8sClient.AppsV1().Deployments(supreme).Get(context.TODO(), appName, metav1.GetOptions{})
// if err != nil {
// log.ErrorF("[DeploymentExist] - deployments [%s] [%s] not exists ! %s", supreme, appName, err.Error())
// return nil
// }
//
// return deployment
//}
//
//func K8sListPVCInNamespace(supreme string) (bool, []string) {
//
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return false, []string{
// "[K8sListPVCInNamespace] - k8s client not exits !",
// }
// }
// }
//
// pvcs, err := k8sClient.CoreV1().PersistentVolumeClaims(supreme).List(context.TODO(), metav1.ListOptions{})
// if err != nil {
// log.ErrorF("[K8sListPVCInNamespace] - error list pvc list in namespace %s", supreme)
// return false, nil
// }
//
// var pvcList []string
// for _, pvc := range pvcs.Items {
// pvcList = append(pvcList, pvc.Name)
// }
//
// log.DebugF("K8sListPVCInNamespace - all pvc in namespace of [ %s ] are %v", supreme, pvcList)
// return true, pvcList
//}
//
//func K8sCheckPVCStatusTimeOut(specificPvcName string, supreme string, waitTimeOut int) bool {
//
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return false
// }
// }
//
// // 设置超时时间和时间间隔
// timeout := time.After(time.Duration(waitTimeOut) * time.Second)
// tick := time.Tick(5 * time.Second)
//
// // 监控Pod状态
// for {
// select {
// case <-timeout:
// log.ErrorF("[K8sCheckPVCStatusTimeOut] - 命名空间: %s, PVC 名称: %s, 状态: 失败! ", supreme, specificPvcName)
// return false
// case <-tick:
// pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(supreme).Get(context.TODO(), specificPvcName, metav1.GetOptions{})
// if err != nil {
// log.ErrorF("[K8sCheckPVCStatusTimeOut] - 命名空间: [ %s ], 获取 PVC [%s] 信息失败: %s ", supreme, specificPvcName, err.Error())
// }
//
// if pvc.Status.Phase == corev1.ClaimBound {
// log.DebugF("[K8sCheckPVCStatusTimeOut] - PVC %s in namespace %s is running", specificPvcName, supreme)
// return true
// } else {
// log.WarnF("[K8sCheckPVCStatusTimeOut] - PVC %s in namespace %s run failed !", specificPvcName, supreme)
// return false
// }
// }
// }
//}
//
//func KubectlCheckPodStatus(specific string, supreme string) bool {
//
// if !BasicCommandExists("kubectl") {
// log.Error("kubectl命令不存在无法查看Pod状态请查看")
// }
//
// ok, resultLog := AllCommandExecutor([]string{
// "kubectl", "-n", supreme, "get", "pod", specific, "-o", "jsonpath='{.status.phase}'",
// })
// if !ok {
// return false
// }
//
// for _, resultLine := range resultLog {
// if strings.HasPrefix(resultLine, "Running") {
// return true
// }
// }
//
// return false
//}
//
//func KubectlApplyExec(resourcesYamlFile string) (bool, []string) {
//
// // check kubectl
// if !BasicCommandExistByPath("kubectl") {
// return false, []string{
// "[KubectlApplyExec] - kubectl command not exist !",
// }
// }
//
// // check resourcesYamlFile
// if !BasicFileExistAndNotNull(resourcesYamlFile) {
// return false, []string{
// fmt.Sprintf("[KubectlApplyExec] - wrong resourcesYamlFile %s not exist !", resourcesYamlFile),
// }
// }
//
// // apply -f
// ok, resultLog := AllCommandExecutor([]string{
// "/usr/local/bin/kubectl",
// "apply",
// "-f",
// resourcesYamlFile,
// })
// if !ok {
// return false, resultLog
// }
//
// return true, append(resultLog,
// fmt.Sprintf("[KubectlApplyExec] - %s apply success!", resourcesYamlFile))
//}
//
//func KubectlDeleteExec(resourcesYamlFile string) (bool, []string) {
//
// // check kubectl
// if !BasicCommandExistByPath("kubectl") {
// return false, []string{
// "[KubectlDeleteExec] - kubectl command not exist !",
// }
// }
//
// // check resourcesYamlFile
// if !BasicFileExistAndNotNull(resourcesYamlFile) {
// return false, []string{
// fmt.Sprintf("[KubectlDeleteExec] - wrong resourcesYamlFile %s not exist !", resourcesYamlFile),
// }
// }
//
// // apply -f
// ok, resultLog := AllCommandExecutor([]string{
// "/usr/local/bin/kubectl",
// "delete",
// "-f",
// resourcesYamlFile,
// })
// if !ok {
// return false, resultLog
// }
//
// return true, append(resultLog,
// fmt.Sprintf("[KubectlDeleteExec] - %s delete success!", resourcesYamlFile))
//}
//
//func K8sCreateNamespace(namespaceName string) bool {
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return false
// }
// }
//
// namespace, err := k8sClient.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
// if err == nil {
// log.InfoF("[K8sCreateNamespace] - namespace of [%s] already exists!", namespaceName)
// return true
// }
//
// // create namespace
// // 创建命名空间对象
// namespace = &corev1.Namespace{
// ObjectMeta: metav1.ObjectMeta{
// Name: namespaceName,
// },
// }
// // 使用客户端创建命名空间
// namespace, err = k8sClient.CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
// if err != nil {
// log.ErrorF("Error getting namespace: %s ", err.Error())
// return false
// }
//
// // check namespace exists
// // 尝试获取名为 "xxg" 的命名空间
// namespace, err = k8sClient.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
// // 如果返回错误,需要判断是因为命名空间不存在还是其他错误
// if err != nil {
// if errors.IsNotFound(err) {
// log.ErrorF("Namespace %s cant be got !", namespaceName)
// return false
// } else {
// log.ErrorF("Error retrieving namespace: %s\n", err.Error())
// }
// return false
// }
//
// log.DebugF("Namespace %s create successful !", namespaceName)
// return true
//}
//
//func K8sGetDashBoardAuthKey() bool {
//
// if k8sClient == nil {
// // this should be the first call of k8s function
// k8sClient = newK8sClientInstance()
// if k8sClient == nil {
// log.ErrorF("k8s client is nil, run k8s function error !")
// return false
// }
// }
//
// // 获取 kube-system 命名空间的 secrets 列表
// secrets, err := k8sClient.CoreV1().Secrets("kube-system").List(context.TODO(), metav1.ListOptions{})
// if err != nil {
// fmt.Printf("Error retrieving secrets from kube-system namespace: %s\n", err.Error())
// os.Exit(1)
// }
//
// // 过滤出名为 admin-user 的 secret
// var adminUserSecretName string
// for _, secret := range secrets.Items {
// if strings.Contains(secret.Name, "admin-user") {
// adminUserSecretName = secret.Name
// break
// }
// }
//
// if adminUserSecretName == "" {
// fmt.Println("No admin-user secret found")
// os.Exit(1)
// }
//
// // 获取并打印特定的 secret 描述信息
// secret, err := k8sClient.CoreV1().Secrets("kube-system").Get(context.TODO(), adminUserSecretName, metav1.GetOptions{})
// if err != nil {
// fmt.Printf("Error retrieving secret %s: %s\n", adminUserSecretName, err.Error())
// os.Exit(1)
// }
//
// // 打印 secret 的详细信息,根据需要格式化输出
// fmt.Printf("Name: %s\nNamespace: %s\nData:\n", secret.Name, secret.Namespace)
// for key, value := range secret.Data {
// fmt.Printf("%s: %s\n", key, value)
// }
//
// return false
//
//}

View File

@@ -0,0 +1,203 @@
package beans
var Ed25519PrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACDk8R4KXGgDa5H2r8HrqW1klShoSISV20sLiXZPZPfeLwAAAJCIan+LiGp/
iwAAAAtzc2gtZWQyNTUxOQAAACDk8R4KXGgDa5H2r8HrqW1klShoSISV20sLiXZPZPfeLw
AAAEDhnul+q0TNTgrO9kfmGsFhtn/rGRIrmhFostjem/QlZuTxHgpcaANrkfavweupbWSV
KGhIhJXbSwuJdk9k994vAAAADHdkZEBjbWlpLmNvbQE=
-----END OPENSSH PRIVATE KEY-----
`
var Ed25519PublicKey = `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOTxHgpcaANrkfavweupbWSVKGhIhJXbSwuJdk9k994v wdd@cmii.com
`
var DefaultSshdConfig = `
# OCTOPUS AGENT DEFAULT SSHD CONFIG - WDD
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
#Include /etc/ssh/sshd_config.d/*.conf
Port 22
Port 22333
AddressFamily any
ListenAddress 0.0.0.0
ListenAddress ::
#HostKey /etc/ssh/ssh_host_rsa_key
#HostKey /etc/ssh/ssh_host_ecdsa_key
#HostKey /etc/ssh/ssh_host_ed25519_key
# Ciphers and keying
#RekeyLimit default none
# Logging
#SyslogFacility AUTH
#LogLevel INFO
# Authentication:
#LoginGraceTime 2m
#PermitRootLogin prohibit-password
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10
#PubkeyAuthentication yes
# Expect .ssh/authorized_keys2 to be disregarded by default in future.
#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
#PasswordAuthentication yes
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
AllowAgentForwarding yes
AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding yes
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
PrintMotd no
#PrintLastLog yes
TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server
# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# AllowTcpForwarding no
# PermitTTY no
# ForceCommand cvs server
PasswordAuthentication yes
PermitRootLogin yes
StrictModes no
ClientAliveInterval 30
ClientAliveCountMax 60
`
var SysctlConfig = `
# 开启 IPv4 路由转发
net.ipv4.ip_forward = 1
# 禁用 IPv6
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
# 开启 IPv4 转发
net.ipv4.conf.all.forwarding = 1
net.ipv4.conf.default.forwarding = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_syncookies = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_tw_recycle = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_tw_reuse = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_fin_timeout = 30
# 开启 IPv4 连接跟踪
net.ipv4.tcp_keepalive_time = 1200
# 开启 IPv4 连接跟踪
net.ipv4.ip_local_port_range = 1024 65535
# 开启 IPv4 连接跟踪
net.ipv4.tcp_max_syn_backlog = 8192
# 开启 IPv4 连接跟踪
net.ipv4.tcp_max_tw_buckets = 5000
# 开启 IPv4 连接跟踪
net.ipv4.tcp_max_orphans = 32768
# 开启 IPv4 连接跟踪
net.ipv4.tcp_synack_retries = 2
# 开启 IPv4 连接跟踪
net.ipv4.tcp_syn_retries = 2
# 开启 IPv4 连接跟踪
net.ipv4.tcp_synflood_protect = 1000
# 开启 IPv4 连接跟踪
net.ipv4.tcp_timestamps = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_window_scaling = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_rmem = 4096 87380 4194304
`

View File

@@ -1,71 +0,0 @@
package beans
var Ed25519PrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACDk8R4KXGgDa5H2r8HrqW1klShoSISV20sLiXZPZPfeLwAAAJCIan+LiGp/
iwAAAAtzc2gtZWQyNTUxOQAAACDk8R4KXGgDa5H2r8HrqW1klShoSISV20sLiXZPZPfeLw
AAAEDhnul+q0TNTgrO9kfmGsFhtn/rGRIrmhFostjem/QlZuTxHgpcaANrkfavweupbWSV
KGhIhJXbSwuJdk9k994vAAAADHdkZEBjbWlpLmNvbQE=
-----END OPENSSH PRIVATE KEY-----
`
var Ed25519PublicKey = `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOTxHgpcaANrkfavweupbWSVKGhIhJXbSwuJdk9k994v wdd@cmii.com
`
var SysctlConfig = `
# 开启 IPv4 路由转发
net.ipv4.ip_forward = 1
# 禁用 IPv6
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
# 开启 IPv4 转发
net.ipv4.conf.all.forwarding = 1
net.ipv4.conf.default.forwarding = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_syncookies = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_tw_recycle = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_tw_reuse = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_fin_timeout = 30
# 开启 IPv4 连接跟踪
net.ipv4.tcp_keepalive_time = 1200
# 开启 IPv4 连接跟踪
net.ipv4.ip_local_port_range = 1024 65535
# 开启 IPv4 连接跟踪
net.ipv4.tcp_max_syn_backlog = 8192
# 开启 IPv4 连接跟踪
net.ipv4.tcp_max_tw_buckets = 5000
# 开启 IPv4 连接跟踪
net.ipv4.tcp_max_orphans = 32768
# 开启 IPv4 连接跟踪
net.ipv4.tcp_synack_retries = 2
# 开启 IPv4 连接跟踪
net.ipv4.tcp_syn_retries = 2
# 开启 IPv4 连接跟踪
net.ipv4.tcp_synflood_protect = 1000
# 开启 IPv4 连接跟踪
net.ipv4.tcp_timestamps = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_window_scaling = 1
# 开启 IPv4 连接跟踪
net.ipv4.tcp_rmem = 4096 87380 4194304
`