[ Cmii ] [ Octopus ] - project a lot
This commit is contained in:
@@ -244,10 +244,13 @@ func UniformAgentServerInfo(agentServerInfo *register.AgentServerInfo) {
|
||||
func BuildAgentOsOperator(agentInfo *status.AgentInfo, agentServerInfo *register.AgentServerInfo) {
|
||||
|
||||
// 2023年8月4日 pass through some key information
|
||||
ossOfflinePrefix := "http://bastion.io"
|
||||
if g.G.AgentConfig != nil {
|
||||
ossOfflinePrefix := g.G.AgentConfig.GetString("octopus.agent.executor.ossOfflinePrefix")
|
||||
if !strings.HasSuffix(ossOfflinePrefix, "/") {
|
||||
ossOfflinePrefix += "/"
|
||||
}
|
||||
}
|
||||
|
||||
// call the init exec function
|
||||
agentOsOperator := executor.BuildAgentOsOperator(agentInfo, ossOfflinePrefix)
|
||||
|
||||
55
agent-go/BastionInitializaion.go
Normal file
55
agent-go/BastionInitializaion.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"wdd.io/agent-go/executor"
|
||||
"wdd.io/agent-go/register"
|
||||
"wdd.io/agent-go/status"
|
||||
)
|
||||
|
||||
func BastionModeInit() {
|
||||
|
||||
// Build For Operator
|
||||
agentServerInfo := ®ister.AgentServerInfo{
|
||||
ServerName: "BastionSingle",
|
||||
ServerIPPbV4: "127.0.0.1",
|
||||
ServerIPInV4: "127.0.0.1",
|
||||
ServerIPPbV6: "",
|
||||
ServerIPInV6: "",
|
||||
Location: "Bastion",
|
||||
Provider: "Bastion",
|
||||
ManagePort: "22",
|
||||
CPUCore: "",
|
||||
CPUBrand: "",
|
||||
OSInfo: "",
|
||||
OSKernelInfo: "",
|
||||
TCPControl: "",
|
||||
Virtualization: "",
|
||||
Platform: "",
|
||||
PlatformFamily: "",
|
||||
PlatformVersion: "",
|
||||
KernelVersion: "",
|
||||
KernelArch: "",
|
||||
IoSpeed: "",
|
||||
MemoryTotal: "",
|
||||
DiskTotal: "",
|
||||
DiskUsage: "",
|
||||
Comment: "",
|
||||
MachineID: "",
|
||||
AgentVersion: "",
|
||||
TopicName: "BastionNode",
|
||||
}
|
||||
|
||||
// re-get agentInfo from status module
|
||||
agentInfo := status.ReportAgentInfo()
|
||||
refreshAgentInfoByStatusInfo(agentInfo, agentServerInfo)
|
||||
BuildAgentOsOperator(agentInfo, agentServerInfo)
|
||||
|
||||
// install docker
|
||||
agentOsOperator := executor.AgentOsOperatorCache
|
||||
// boot up minio & rabbitmq
|
||||
agentOsOperator.InstallDockerFromLocalExec(nil)
|
||||
agentOsOperator.InstallDockerComposeFromLocalExec()
|
||||
|
||||
// build for socks server
|
||||
|
||||
}
|
||||
BIN
agent-go/bastion_mode_init/amd64/socks5_linux_amd64
Normal file
BIN
agent-go/bastion_mode_init/amd64/socks5_linux_amd64
Normal file
Binary file not shown.
BIN
agent-go/bastion_mode_init/arm64/socks5_linux_arm64
Normal file
BIN
agent-go/bastion_mode_init/arm64/socks5_linux_arm64
Normal file
Binary file not shown.
15
agent-go/bastion_mode_init/bastion_mode.sh
Normal file
15
agent-go/bastion_mode_init/bastion_mode.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 需要修改的部分
|
||||
# 需要修改的部分
|
||||
|
||||
# Socks5
|
||||
install_socks5() {
|
||||
|
||||
}
|
||||
# MINIO 安装
|
||||
install_minio_server() {
|
||||
|
||||
}
|
||||
##
|
||||
# RabbitMQ 安装 初始化
|
||||
@@ -796,7 +796,7 @@ func (op *AgentOsOperator) deployIngress(funcArgs []string) (bool, []string) {
|
||||
}
|
||||
parseIP = net.ParseIP(funcArgs[3])
|
||||
if parseIP == nil {
|
||||
return false, append(result, "ip args error !")
|
||||
return false, append(result, "替换A1C1IP信息 ip args error !")
|
||||
}
|
||||
if !BasicReplace(k8sIngressYamlFile, "A1C1IP", funcArgs[3]) {
|
||||
result = append(result, "替换A1C1IP信息")
|
||||
|
||||
@@ -935,6 +935,12 @@ func (op *AgentOsOperator) installDockerOfflineExec(args []string) (bool, []stri
|
||||
"[installDockerOfflineExec] - docker offline installation success!",
|
||||
}
|
||||
}
|
||||
func (op *AgentOsOperator) InstallDockerFromLocalExec(args []string) (bool, []string) {
|
||||
|
||||
return true, []string{
|
||||
"[installDockerFromLocalExec] - docker offline installation from local success!",
|
||||
}
|
||||
}
|
||||
|
||||
func (op *AgentOsOperator) removeDockerCompose() [][]string {
|
||||
|
||||
@@ -1021,6 +1027,13 @@ func (op *AgentOsOperator) installDockerComposeExec() (bool, []string) {
|
||||
log.Info("docker-compose安装成功!")
|
||||
return true, []string{"docker-compose安装成功!"}
|
||||
}
|
||||
|
||||
func (op *AgentOsOperator) InstallDockerComposeFromLocalExec() (bool, []string) {
|
||||
return true, []string{
|
||||
"[installDockerComposeFromLocalExec] - docker-compose offline installation from local success!",
|
||||
}
|
||||
}
|
||||
|
||||
func (op *AgentOsOperator) installHelm() [][]string {
|
||||
installHelmFunc := [][]string{
|
||||
{
|
||||
|
||||
@@ -15,9 +15,15 @@ func main() {
|
||||
// 解析命令行参数
|
||||
var version string
|
||||
var agentServerInfoConf string
|
||||
var mode string
|
||||
flag.StringVar(&version, "version", "", "config file version")
|
||||
flag.StringVar(&mode, "mode", "agent", "agent run mode")
|
||||
flag.StringVar(&agentServerInfoConf, "agentServerInfoConf", "", "agent server info conf file")
|
||||
flag.Parse()
|
||||
if mode == "bastion" {
|
||||
BastionModeInit()
|
||||
return
|
||||
}
|
||||
// 读取对应版本的配置文件
|
||||
filename := fmt.Sprintf("octopus-agent-%s.yaml", version)
|
||||
println("config file name is => " + filename)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
"wdd.io/agent-go/executor"
|
||||
"wdd.io/agent-go/utils"
|
||||
image2 "wdd.io/cmii_operator/image"
|
||||
)
|
||||
|
||||
var CmiiOperator = CmiiK8sOperator{}
|
||||
@@ -250,18 +251,13 @@ func RestartCmiiFrontendDeployment(cmiiEnv string) {
|
||||
|
||||
func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) bool {
|
||||
|
||||
deployment := CmiiOperator.DeploymentExist(cmiiEnv, appName)
|
||||
if deployment == nil {
|
||||
log.ErrorF("[UpdateCmiiDeploymentImageTag] - [%s] [%s] not exists !", cmiiEnv, appName)
|
||||
cmiiDeploymentInterface := CmiiOperator.DeploymentOneInterface(cmiiEnv, appName)
|
||||
if cmiiDeploymentInterface == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
deploymentInterface := CmiiDeploymentInterface{}
|
||||
cmiiDeploymentInterface := deploymentInterface.Convert(*deployment)
|
||||
// check if need to update
|
||||
if cmiiDeploymentInterface.ImageTag == newTag {
|
||||
log.DebugF("[UpdateCmiiDeploymentImageTag] - [%s] [%s] image tag are the same ! no need to update !", cmiiEnv, appName)
|
||||
|
||||
// restart
|
||||
if CmiiOperator.DeploymentRestart(cmiiEnv, appName) {
|
||||
return true
|
||||
@@ -270,8 +266,8 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
content := executor.BasicWordSpaceCompletion(utils.TimeSplitFormatString()+" "+cmiiDeploymentInterface.Namespace, 45)
|
||||
content = executor.BasicWordSpaceCompletion(content+cmiiDeploymentInterface.Name, 85)
|
||||
content := executor.BasicWordSpaceCompletion(utils.TimeSplitFormatString()+" "+cmiiDeploymentInterface.Namespace, 35)
|
||||
content = executor.BasicWordSpaceCompletion(content+cmiiDeploymentInterface.Name, 75)
|
||||
content = executor.BasicWordSpaceCompletion(content+cmiiDeploymentInterface.ImageTag, 105)
|
||||
content = content + newTag + "\n"
|
||||
|
||||
@@ -296,7 +292,7 @@ func UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag string) bool {
|
||||
}
|
||||
|
||||
// log
|
||||
log.InfoF("[UpdateCmiiDeploymentImageTag] - real image tag are [%s] \n update tag [%s] success ! ", deploy.Image, content)
|
||||
//log.InfoF("[UpdateCmiiDeploymentImageTag] - real image tag are [%s] update tag [%s] success ! ", deploy.Image, content)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -305,7 +301,9 @@ func UpdateCmiiImageTagFromNameTagMap(cmiiEnv string, nameTagMap map[string]stri
|
||||
result = make(map[string]string, len(nameTagMap))
|
||||
for appName, newTag := range nameTagMap {
|
||||
if AppNameBelongsToCmiiImage(appName) {
|
||||
|
||||
if UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag) {
|
||||
log.InfoF("[UpdateCmiiImageTagFromNameTagMap] - %s %s to %s", cmiiEnv, appName, newTag)
|
||||
result[appName] = newTag
|
||||
} else {
|
||||
result[appName] = "false"
|
||||
@@ -407,13 +405,14 @@ func BackupAllDeploymentFromEnv(cmiiEnv string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func BackupAllCmiiDeploymentToMap(cmiiEnv string) (backendMap, frontendMap map[string]string) {
|
||||
func BackupAllCmiiDeploymentToMap(cmiiEnv string) (backendMap, frontendMap, srsMap map[string]string) {
|
||||
|
||||
allInterface := CmiiOperator.DeploymentAllInterface(cmiiEnv)
|
||||
allInterface = FilterAllCmiiAppSoft(allInterface)
|
||||
|
||||
backendMap = make(map[string]string, len(allInterface))
|
||||
frontendMap = make(map[string]string, len(allInterface))
|
||||
srsMap = make(map[string]string, len(allInterface))
|
||||
|
||||
for _, deploymentInterface := range allInterface {
|
||||
if strings.Contains(deploymentInterface.Name, "platform") {
|
||||
@@ -423,7 +422,33 @@ func BackupAllCmiiDeploymentToMap(cmiiEnv string) (backendMap, frontendMap map[s
|
||||
}
|
||||
}
|
||||
|
||||
return backendMap, frontendMap
|
||||
// add srs part
|
||||
for key, value := range CmiiSrsAppMap {
|
||||
var app *CmiiDeploymentInterface
|
||||
if strings.Contains(value, "deployment") {
|
||||
app = CmiiOperator.DeploymentOneInterface(cmiiEnv, key)
|
||||
if app != nil {
|
||||
for _, image := range app.ContainerImageMap {
|
||||
split := strings.Split(image, ":")
|
||||
if strings.Contains(split[0], image2.CmiiHarborPrefix) {
|
||||
split[0] = strings.Split(split[0], image2.CmiiHarborPrefix)[1]
|
||||
}
|
||||
srsMap[split[0]] = split[1]
|
||||
}
|
||||
}
|
||||
} else if strings.Contains(value, "state") {
|
||||
app = CmiiOperator.StatefulSetOneInterface(cmiiEnv, key)
|
||||
if app != nil {
|
||||
for _, image := range app.ContainerImageMap {
|
||||
split := strings.Split(image, ":")
|
||||
split[0], _ = strings.CutPrefix(split[0], image2.CmiiHarborPrefix)
|
||||
srsMap[split[0]] = split[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return backendMap, frontendMap, srsMap
|
||||
}
|
||||
|
||||
func BackUpAllCmiiAppImageNameFromEnv(cmiiEnv string) {
|
||||
|
||||
@@ -50,14 +50,6 @@ func TestFindCmiiMiddlewarePodInterface(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupAllCmiiDeploymentToMap(t *testing.T) {
|
||||
backendMap, frontendMap := BackupAllCmiiDeploymentToMap(demo)
|
||||
|
||||
utils.BeautifulPrint(backendMap)
|
||||
utils.BeautifulPrint(frontendMap)
|
||||
|
||||
}
|
||||
|
||||
func TestRollBackCmiiDeploymentFromUpdateLog(t *testing.T) {
|
||||
updateLog := RollBackCmiiDeploymentFromUpdateLog("2024-01-10-14-37-07 uavcloud-devflight cmii-uav-depotautoreturn 12345678 123sdsa45678")
|
||||
|
||||
@@ -149,6 +141,15 @@ func TestRestartDeploymentFromList(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestBackupAllCmiiDeploymentToMap(t *testing.T) {
|
||||
|
||||
backendMap, frontendMap, srsMap := BackupAllCmiiDeploymentToMap(demo)
|
||||
|
||||
utils.BeautifulPrint(backendMap)
|
||||
utils.BeautifulPrint(frontendMap)
|
||||
utils.BeautifulPrint(srsMap)
|
||||
}
|
||||
|
||||
func TestUpdateCmiiImageTagFromNameTagMap(t *testing.T) {
|
||||
|
||||
cmii530BackendMap := map[string]string{
|
||||
@@ -227,14 +228,16 @@ func TestBackupAllDeploymentFromEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBackUpAllCmiiAppImageNameFromEnv(t *testing.T) {
|
||||
|
||||
BackUpAllCmiiAppImageNameFromEnv(demo)
|
||||
|
||||
}
|
||||
|
||||
func TestUpdateCmiiDeploymentImageTag(t *testing.T) {
|
||||
|
||||
cmiiEnv := demo
|
||||
appName := "cmii-uav-device"
|
||||
newTag := "5.4.0-26905"
|
||||
appName := "cmii-uav-platform"
|
||||
newTag := "5.4.0"
|
||||
|
||||
tag := UpdateCmiiDeploymentImageTag(cmiiEnv, appName, newTag)
|
||||
assert.Equal(t, tag, true, "update image tag failed !")
|
||||
|
||||
@@ -3,8 +3,10 @@ package cmii_operator
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"wdd.io/agent-go/executor"
|
||||
"wdd.io/agent-go/utils"
|
||||
"wdd.io/cmii_operator/image"
|
||||
)
|
||||
|
||||
const OfflineImageGzipFolderPrefix = "/root/octopus_image/"
|
||||
@@ -15,7 +17,7 @@ const DirectPushDeployHarborHost = "36.134.28.60"
|
||||
func FetchDemoImages(projectName string, gzipSplit bool) (errorPullImageList, errorGzipImageList []string) {
|
||||
|
||||
// generate a project folder
|
||||
err := os.Mkdir(OfflineImageGzipFolderPrefix+projectName, os.ModeDir)
|
||||
err := os.MkdirAll(OfflineImageGzipFolderPrefix+projectName, os.ModeDir)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
log.ErrorF("[FetchDemoImages] - create folder of %s error %s", OfflineImageGzipFolderPrefix+projectName, err.Error())
|
||||
@@ -24,16 +26,19 @@ func FetchDemoImages(projectName string, gzipSplit bool) (errorPullImageList, er
|
||||
}
|
||||
|
||||
// get demo image version map
|
||||
backendMap, frontendMap := BackupAllCmiiDeploymentToMap(demo)
|
||||
backendMap, frontendMap, srsMap := BackupAllCmiiDeploymentToMap(demo)
|
||||
|
||||
utils.BeautifulPrint(backendMap)
|
||||
utils.BeautifulPrint(frontendMap)
|
||||
utils.BeautifulPrint(srsMap)
|
||||
|
||||
// save map to file
|
||||
backendMapFile := OfflineImageGzipFolderPrefix + projectName + "-backend-app.json"
|
||||
frontendMapFile := OfflineImageGzipFolderPrefix + projectName + "-frontend-app.json"
|
||||
srsMapFile := OfflineImageGzipFolderPrefix + projectName + "-srs-app.json"
|
||||
_ = os.Remove(backendMapFile)
|
||||
_ = os.Remove(frontendMapFile)
|
||||
_ = os.Remove(srsMapFile)
|
||||
|
||||
executor.BasicAppendContentToFile(
|
||||
utils.BeautifulPrintToString(backendMap),
|
||||
@@ -43,21 +48,31 @@ func FetchDemoImages(projectName string, gzipSplit bool) (errorPullImageList, er
|
||||
utils.BeautifulPrintToString(frontendMap),
|
||||
frontendMapFile,
|
||||
)
|
||||
executor.BasicAppendContentToFile(
|
||||
utils.BeautifulPrintToString(srsMapFile),
|
||||
srsMapFile,
|
||||
)
|
||||
|
||||
// download image
|
||||
backendPull := ImagePullFromCmiiHarborByMap(backendMap, true)
|
||||
frontendPull := ImagePullFromCmiiHarborByMap(frontendMap, true)
|
||||
backendPull := image.ImagePullFromCmiiHarborByMap(backendMap, true)
|
||||
frontendPull := image.ImagePullFromCmiiHarborByMap(frontendMap, true)
|
||||
srsPull := image.ImagePullFromCmiiHarborByMap(srsMap, true)
|
||||
|
||||
// compress image
|
||||
if gzipSplit {
|
||||
for image, tag := range backendMap {
|
||||
if !ImageSaveToTarGZ(image+":"+tag, OfflineImageGzipFolderPrefix+projectName+"/app/") {
|
||||
errorGzipImageList = append(errorGzipImageList, CmiiHarborPrefix+image+":"+tag)
|
||||
for image_name, tag := range backendMap {
|
||||
if !image.SaveToTarGZ(image_name+":"+tag, OfflineImageGzipFolderPrefix+projectName+"/app/") {
|
||||
errorGzipImageList = append(errorGzipImageList, image.CmiiHarborPrefix+image_name+":"+tag)
|
||||
}
|
||||
}
|
||||
for image, tag := range frontendMap {
|
||||
if !ImageSaveToTarGZ(image+":"+tag, OfflineImageGzipFolderPrefix+projectName+"/app/") {
|
||||
errorGzipImageList = append(errorGzipImageList, CmiiHarborPrefix+image+":"+tag)
|
||||
for image_name, tag := range frontendMap {
|
||||
if !image.SaveToTarGZ(image_name+":"+tag, OfflineImageGzipFolderPrefix+projectName+"/app/") {
|
||||
errorGzipImageList = append(errorGzipImageList, image.CmiiHarborPrefix+image_name+":"+tag)
|
||||
}
|
||||
}
|
||||
for image_name, tag := range srsMap {
|
||||
if !image.SaveToTarGZ(image_name+":"+tag, OfflineImageGzipFolderPrefix+projectName+"/app/") {
|
||||
errorGzipImageList = append(errorGzipImageList, image.CmiiHarborPrefix+image_name+":"+tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -67,21 +82,73 @@ func FetchDemoImages(projectName string, gzipSplit bool) (errorPullImageList, er
|
||||
|
||||
errorPullImageList = append(errorPullImageList, backendPull...)
|
||||
errorPullImageList = append(errorPullImageList, frontendPull...)
|
||||
errorPullImageList = append(errorPullImageList, srsPull...)
|
||||
|
||||
return errorPullImageList, errorGzipImageList
|
||||
}
|
||||
|
||||
func FetchVersionImages(cmiiVersion string, gzipSplit bool) (errorPullImageList, errorGzipImageList []string) {
|
||||
|
||||
// generate a project folder
|
||||
err := os.MkdirAll(OfflineImageGzipFolderPrefix+cmiiVersion, os.ModeDir)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
log.ErrorF("[FetchDemoImages] - create folder of %s error %s", OfflineImageGzipFolderPrefix+cmiiVersion, err.Error())
|
||||
return errorPullImageList, errorGzipImageList
|
||||
}
|
||||
}
|
||||
|
||||
backendMap := CmiiBackendAppMap
|
||||
frontendMap := CmiiFrontendAppMap
|
||||
|
||||
for app, _ := range backendMap {
|
||||
backendMap[app] = cmiiVersion
|
||||
}
|
||||
for app, _ := range frontendMap {
|
||||
frontendMap[app] = cmiiVersion
|
||||
}
|
||||
|
||||
var allCmiiImageName []string
|
||||
|
||||
allCmiiImageName = append(allCmiiImageName, image.ConvertCMiiImageMapToList(backendMap)...)
|
||||
allCmiiImageName = append(allCmiiImageName, image.ConvertCMiiImageMapToList(frontendMap)...)
|
||||
|
||||
for key, value := range CmiiSrsAppMap {
|
||||
var app *CmiiDeploymentInterface
|
||||
if strings.Contains(value, "deployment") {
|
||||
app = CmiiOperator.DeploymentOneInterface(demo, key)
|
||||
if app != nil {
|
||||
allCmiiImageName = append(allCmiiImageName, app.Image)
|
||||
}
|
||||
} else if strings.Contains(value, "state") {
|
||||
app = CmiiOperator.StatefulSetOneInterface(demo, key)
|
||||
if app != nil {
|
||||
for _, imageName := range app.ContainerImageMap {
|
||||
allCmiiImageName = append(allCmiiImageName, imageName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
utils.BeautifulPrint(allCmiiImageName)
|
||||
|
||||
// do work
|
||||
errorPullImageList, errorGzipImageList = image.PullFromListAndCompressSplit(allCmiiImageName, OfflineImageGzipFolderPrefix+cmiiVersion)
|
||||
|
||||
return errorPullImageList, errorGzipImageList
|
||||
}
|
||||
|
||||
func FetchDependencyRepos(gzipSplit bool) (errorPullImageList, errorGzipImageList []string) {
|
||||
err := os.Mkdir(OfflineImageGzipFolderPrefix, os.ModeDir)
|
||||
err := os.MkdirAll(OfflineImageGzipFolderPrefix, os.ModeDir)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
log.ErrorF("[FetchDependencyRepos] - create folder of %s error %s", OfflineImageGzipFolderPrefix, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
errorPullImageList, errorGzipImageList = ImagePullFromListAndCompressSplit(MiddlewareAmd64, OfflineImageGzipFolderPrefix+"middle/")
|
||||
errorPullImageList, errorGzipImageList = image.PullFromListAndCompressSplit(image.MiddlewareAmd64, OfflineImageGzipFolderPrefix+"middle/")
|
||||
|
||||
pull, gzipImageList := ImagePullFromListAndCompressSplit(Rancher1204Amd64, OfflineImageGzipFolderPrefix+"rke/")
|
||||
pull, gzipImageList := image.PullFromListAndCompressSplit(image.Rancher1204Amd64, OfflineImageGzipFolderPrefix+"rke/")
|
||||
|
||||
return append(errorPullImageList, pull...), append(errorGzipImageList, gzipImageList...)
|
||||
|
||||
@@ -91,9 +158,9 @@ func LoadSplitGzipImageToTargetHarbor(projectName, targetHarborHost string) (err
|
||||
|
||||
// list folder
|
||||
projectGzipFolder := OfflineImageGzipFolderPrefix + projectName
|
||||
errorLoadImageNameList = append(errorLoadImageNameList, ImageLoadFromFolderPath(projectGzipFolder)...)
|
||||
errorLoadImageNameList = append(errorLoadImageNameList, image.ImageLoadFromFolderPath(projectGzipFolder)...)
|
||||
// read from json
|
||||
errorPushImageNameList = append(errorPushImageNameList, ImageTagFromListAndPushToCHarbor(Cmii520DemoImageList, targetHarborHost)...)
|
||||
errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(image.Cmii520DemoImageList, targetHarborHost)...)
|
||||
|
||||
// re-tag
|
||||
// push
|
||||
@@ -111,8 +178,8 @@ func LoadSplitDepGzipImageToTargetHarbor(targetHarborHost string) (errorLoadImag
|
||||
//errorLoadImageNameList = append(errorLoadImageNameList, ImageLoadFromFolderPath(middle)...)
|
||||
//errorLoadImageNameList = append(errorLoadImageNameList, ImageLoadFromFolderPath(rke)...)
|
||||
|
||||
errorPushImageNameList = append(errorPushImageNameList, ImageTagFromListAndPushToCHarbor(MiddlewareAmd64, targetHarborHost)...)
|
||||
errorPushImageNameList = append(errorPushImageNameList, ImageTagFromListAndPushToCHarbor(Rancher1204Amd64, targetHarborHost)...)
|
||||
errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(image.MiddlewareAmd64, targetHarborHost)...)
|
||||
errorPushImageNameList = append(errorPushImageNameList, image.TagFromListAndPushToCHarbor(image.Rancher1204Amd64, targetHarborHost)...)
|
||||
|
||||
return errorLoadImageNameList, errorPushImageNameList
|
||||
|
||||
|
||||
@@ -7,13 +7,20 @@ import (
|
||||
|
||||
func TestFetchDemoImages(t *testing.T) {
|
||||
|
||||
errorPullImageList, errorGzipImageList := FetchDemoImages("cqga", true)
|
||||
errorPullImageList, errorGzipImageList := FetchDemoImages("shls", true)
|
||||
|
||||
utils.BeautifulPrintListWithTitle(errorPullImageList, "cmii errorPullImageList")
|
||||
utils.BeautifulPrintListWithTitle(errorGzipImageList, "cmii errorGzipImageList")
|
||||
|
||||
}
|
||||
|
||||
func TestFetchVersionImages(t *testing.T) {
|
||||
errorPullImageList, errorGzipImageList := FetchVersionImages("5.4.0", true)
|
||||
|
||||
utils.BeautifulPrintListWithTitle(errorPullImageList, "cmii errorPullImageList")
|
||||
utils.BeautifulPrintListWithTitle(errorGzipImageList, "cmii errorGzipImageList")
|
||||
}
|
||||
|
||||
func TestFetchDependencyRepos(t *testing.T) {
|
||||
|
||||
errorPullImageList, errorGzipImageList := FetchDependencyRepos(true)
|
||||
|
||||
@@ -76,7 +76,12 @@ func (deploy CmiiDeploymentInterface) Convert(deployment v1.Deployment) CmiiDepl
|
||||
containerImageMap[container.Name] = container.Image
|
||||
deploy.Image = container.Image
|
||||
deploy.ContainerName = container.Name
|
||||
|
||||
if strings.Contains(container.Image, ":8033") {
|
||||
deploy.ImageTag = strings.Split(container.Image, ":")[2]
|
||||
} else {
|
||||
deploy.ImageTag = strings.Split(container.Image, ":")[1]
|
||||
}
|
||||
|
||||
for _, envVar := range container.Env {
|
||||
if strings.HasPrefix(envVar.Name, "GIT_BRANCH") {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"time"
|
||||
"wdd.io/agent-go/logger"
|
||||
"wdd.io/agent-go/utils"
|
||||
"wdd.io/cmii_operator/image"
|
||||
)
|
||||
|
||||
var log = logger.Log
|
||||
@@ -397,10 +398,19 @@ func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string)
|
||||
if len(containers) == 1 {
|
||||
// only update this kind
|
||||
container := containers[0]
|
||||
split := strings.Split(container.Image, ":")
|
||||
|
||||
oldName := container.Image
|
||||
|
||||
split := strings.Split(container.Image, ":")
|
||||
if strings.HasPrefix(container.Image, image.CmiiHarborPrefix) {
|
||||
// harbor
|
||||
container.Image = split[0] + ":" + newTag
|
||||
log.InfoF("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s]", op.CurrentNamespace, appName, split[1], container.Image)
|
||||
} else if strings.Contains(container.Image, "8033") {
|
||||
// 192.168.6.6:8033/rancher/k8s-dns-sidecar:v1.0.2
|
||||
container.Image = split[0] + ":" + split[1] + ":" + newTag
|
||||
}
|
||||
|
||||
log.DebugF("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s]", op.CurrentNamespace, appName, oldName, container.Image)
|
||||
|
||||
// re assign
|
||||
deployment.Spec.Template.Spec.Containers[0] = container
|
||||
@@ -411,6 +421,9 @@ func (op *CmiiK8sOperator) DeploymentUpdateTag(cmiiEnv, appName, newTag string)
|
||||
log.ErrorF("[DeploymentUpdateTag] - update [%s] [%s] from [%s] to [%s] error ! %s", op.CurrentNamespace, appName, split[1], container.Image, err.Error())
|
||||
return false
|
||||
}
|
||||
} else if len(containers) == 2 {
|
||||
log.ErrorF("[DeploymentUpdateTag] - cant update app with 2 containers !")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
9
cmii_operator/actual_project/octopus-agent-run.txt
Normal file
9
cmii_operator/actual_project/octopus-agent-run.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
|
||||
# internet
|
||||
bash <(curl -sL http://42.192.52.227:9000/octopus/init-script-wdd.sh) --url http://42.192.52.227:9000/octopus --agent-install --offline
|
||||
|
||||
# no internet
|
||||
|
||||
export offline_minio=103.0.180.82
|
||||
bash <(curl -sL http://${offline_minio}:9000/octopus/init-script-wdd.sh) --url http://${offline_minio}:9000/octopus --agent-install --offline
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1 @@
|
||||
#!/bin/bash
|
||||
@@ -0,0 +1,28 @@
|
||||
Set-ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
|
||||
$basePath = $PWD
|
||||
|
||||
Write-Host "Current Running Path is $basePath"
|
||||
# This is a PowerShell script to run port_win64.exe with admin privileges and keep running in the background
|
||||
|
||||
Write-Host "Start the port forwarding !"
|
||||
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "udp listen:0.0.0.0:53 conn:223.5.5.5:53" -Verb RunAs -WindowStyle Hidden
|
||||
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:80 conn:42.192.52.227:80" -Verb RunAs -WindowStyle Hidden
|
||||
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:9000 conn:42.192.52.227:9000" -Verb RunAs -WindowStyle Hidden
|
||||
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:20672 conn:42.192.52.227:20672" -Verb RunAs -WindowStyle Hidden
|
||||
Start-Process -FilePath "$basePath\port_win64.exe" -ArgumentList "tcp listen:0.0.0.0:20678 conn:42.192.52.227:20678" -Verb RunAs -WindowStyle Hidden
|
||||
|
||||
Write-Host "Start the socks !"
|
||||
Start-Process -FilePath "$basePath\socks5_win64.exe" -ArgumentList "9997" -Verb RunAs -WindowStyle Hidden
|
||||
# Keep script running until terminal is closed
|
||||
Write-Host ""
|
||||
netstat -ano | findstr 53
|
||||
Write-Host ""
|
||||
netstat -ano | findstr 9000
|
||||
Write-Host ""
|
||||
netstat -ano | findstr 20672
|
||||
Write-Host ""
|
||||
netstat -ano | findstr 20678
|
||||
Write-Host ""
|
||||
|
||||
$null = Read-Host "Press Enter to close this script"
|
||||
Binary file not shown.
Binary file not shown.
123
cmii_operator/actual_project/zyga/operator.go
Normal file
123
cmii_operator/actual_project/zyga/operator.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"wdd.io/cmii_operator"
|
||||
)
|
||||
|
||||
var realConfig = `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
api-version: v1
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN3akNDQWFxZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFTTVJBd0RnWURWUVFERXdkcmRXSmwKTFdOaE1CNFhEVEkwTURNd056QTVNamd3TkZvWERUTTBNRE13TlRBNU1qZ3dORm93RWpFUU1BNEdBMVVFQXhNSAphM1ZpWlMxallUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUwvNENFYy96cnNPCnpzTG9OVlhTQWFLTkpldGs2RlBCbFYvditLcFNOelFOY1FsZ0hSN2NSSWl0c0N2eHBvYUtucFY4VEFLZFJpb3gKTGRPakM4a1E1OUt3cXk5SXU1Wk5LYWpOaDVIZDNCdzlMOHJiUVJoTThwRWp3dzRJTFdhdzNNMlF2NnA2YjdqRgpQN0h1c3VWZW1JVEl4TTl1T3BtQzNVOWZaQzVIbVpKZDdpaEJzaVpMR2lZOGVES2lPbGh6am10amNQWUFiUnE4Cml6UW1zcmdhUityb203YTdBQTdxU3ZTdHlyTmRjbXFBQmRvU3lEUDhaOFBzWlB2djhWSisyOUJ1eEgveVhCLzIKaVBsaG83Yjl4eGduSmJxaURRS0NsbzVjcFBzbWpQQ0JkZmJPVk9ORzhRZzY1UmJPems2TnNXUzNvLzFWVklaSwpqeVMyZjFlcjFBMENBd0VBQWFNak1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CCkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQjRNd3ZyWGZOaHBWTVpFNWMyY2gxS05CYStJNGc1Z2tnU1YKajNGN3BRV1FpbXZSb1VDSmhjOEJiZmRIVnN1VzE5NE9FREx6WUtONGxDb20vWUVZNUN3R0lwVjhVRlN0aDJuZQpxcTRhM1crVW4xM2Z1TWdBemxTblFSQ3F0d01UbEh3cnlnaUQ0bE5HMGVEdVdoVlMwdVpZSHFpV0Y2OENUZmp5Cng3UVhsVmNrTU1XZzhoSlIwNG1QV1BhYis5cDd0b3Q1WWZwK0kxOWU5V2dpelJNNCs3TGoxUmpCVGN4WGFaYWgKL3JrMjZzV3JmK0xkcEh6c0U1cFc3LzlKM09MNGdTWFJKb09kclQwK1lsczVIRm83Q1d5TW1tbmVxMlR4Q2tHSwpxTkVzNUUrdDYrYStCL3B0cXZHd3RmbnlKeFV1YkZhY3FJeG1vbGo3UW52OWR1RVRiQkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
server: "https://127.0.0.1:6443"
|
||||
name: "rke-cluster"
|
||||
contexts:
|
||||
- context:
|
||||
cluster: "rke-cluster"
|
||||
user: "kube-admin-rke-cluster"
|
||||
name: "rke-cluster"
|
||||
current-context: "rke-cluster"
|
||||
users:
|
||||
- name: "kube-admin-rke-cluster"
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lJZEtaNDNXVVpLOE13RFFZSktvWklodmNOQVFFTEJRQXdFakVRTUE0R0ExVUUKQXhNSGEzVmlaUzFqWVRBZUZ3MHlOREF6TURjd09USTRNRFJhRncwek5EQXpNRFV3T1RVd05EWmFNQzR4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVJNd0VRWURWUVFERXdwcmRXSmxMV0ZrYldsdU1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTA0M1hyS215Rkgvemw5SU9ubjJkODN5Mlo2Rm4KMXhiYVZMN09nTXlZelVCS204WFdWY0V5L1RaRTBnV1pJdm9nTmtVOGptd0F6d0dxb2dmeS9nVVk2VWRINkVDQgowcVRMUDFkQTlJSU1XL3c5SlpjUU0wTWw3Qi9NUVNYbWRkRmZhWHk1TjlYYWpoSVB3ZFFKRFNOZ2cwblRKZnYvCmZSaU1PUWhMYTVBUUNHQjFEZ2pjdC8xd1dZSEF4Qks1Rlk0QTh0UTA4SzlxV1ovYnpQWXUzMGlsWjkvTllrcHAKRHVpVUhYZEdEZHAvbUtianl5LzcwVktXUmxDSmlCUWpXajdTZEd5dEZtNTN6YW9CdGh5OFhibFNaVHR4QUx6bgp5UWYweENrZGxZeWFaMFJDOXhvaFF0NzZQNFkzZmhaYlpMaStjV2MwRG1SdlJEN0FhSGREb1EwQ0tRSURBUUFCCm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFEVlEwbTEvTk0xdHBzTlVhRDhMeGJNRUw3NEVvR2RVWVpoZWtGbWRBQXBISENEUgpiMjNuYzBtN2FIbmdGZEZEQk9lNFBEaDFnKzY0V2JrNjdiUHpXUjJ6NE1kdlhnQW5vdUhjZ2xJMUYxdUpVWVJ2CmZJdmVlem82UkFqYjUrcXB5c1IxbmkwMEtGQjZJQU5oMW9zRElKNUNkTXJma2xxWDQvK0hTbDZ6alJPU2xlYmIKTy9mWFduemt3cGRtNFFPQ2xjRTBHTDlZNHl4Q25nd3VWc3lTMWI0OHpobk5GTDhVUGxpNC9YQVM5cVBVSzdZYwpYYWpHeWs1cFkrRFVhMFN2NDdweVhFUVZNREVzQmQwUGJ6eGk0anp0cHcvQjlQbm5OQVVpN05UMVh1aEFyOUMxCmI0Mjl4UHQySjE2ejZycXp5b3VXUFQ3RHM1WEVTQnM4dDZISFBRcz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMDQzWHJLbXlGSC96bDlJT25uMmQ4M3kyWjZGbjF4YmFWTDdPZ015WXpVQkttOFhXClZjRXkvVFpFMGdXWkl2b2dOa1U4am13QXp3R3FvZ2Z5L2dVWTZVZEg2RUNCMHFUTFAxZEE5SUlNVy93OUpaY1EKTTBNbDdCL01RU1htZGRGZmFYeTVOOVhhamhJUHdkUUpEU05nZzBuVEpmdi9mUmlNT1FoTGE1QVFDR0IxRGdqYwp0LzF3V1lIQXhCSzVGWTRBOHRRMDhLOXFXWi9ielBZdTMwaWxaOS9OWWtwcER1aVVIWGRHRGRwL21LYmp5eS83CjBWS1dSbENKaUJRaldqN1NkR3l0Rm01M3phb0J0aHk4WGJsU1pUdHhBTHpueVFmMHhDa2RsWXlhWjBSQzl4b2gKUXQ3NlA0WTNmaFpiWkxpK2NXYzBEbVJ2UkQ3QWFIZERvUTBDS1FJREFRQUJBb0lCQURLeUpnSDVkSFJYS1MregpwYzh6T1J1MVFoelpZQUg2TnYzaDc2aUwzdjRvcnZoZlUzcWZYckd4UkpLenhydk1CdFlhaDJWMTJrZkJGWHZZCnZkRkR0WEdKcEdDeXZLbVcxaUhxcmVVcUdQNGVGeVVmNjBEdGtYUGhOdGhSMWNWY0ZDbzZPa1I0R2ZTN3ZVenMKbS9LckRLREptekRhRDZLUnRHQ2liVGhzZ0hzUStsOXhQQ3RYYldSVTlIV2cwUTY4Y1l1TVMzRUhyNlJtbHVVdAoxeC8veklLUnNLSksxL2ZpS0o2bHMxUVhYb01EMHVQK1dPYlNsQnNiZkNpM0Z2SmNBdDNOVnc1eEtJMFJxN0R2Ck1LdEp0WUFreFNhK2NWZ3BNSEh5WFZmaVh0VnhVT0dzRzk2OUlRWWlNdzh3TkFwa2ZRbHNOSm9MWkdpemJBWkEKNEhrZjA0RUNnWUVBMUx3SHFGYXNDcElxOFFxVkdwOEkvMjNIcXF5bm1rQThTVS9BK2RVV3hxVjJ4L2dWQy9oZApCc3FiZHNLS3Y4RU5Sc3BRNm5sc0FpU05xS0hHeUZvbzF6UVhFVHE4WVhIaG5GVGZoMm5uVFZwRmJCNVdhTTRXCmRaa04vUzZsSGhDaDIxTnJUcEl0dnhjM0JDemc3NloxVHFaV01yc3JCZE9tbDZMUnNJUzZRTkVDZ1lFQS9wUmEKczI3MzFKZjYra0p0VXRYemVGbk9aeEZOR0hTemFFNmI0UnhuWERmN2w2R0tmMElwemtqVUhjUzA0bWpNditmbApJaDBsVGVYaE5hQm13ZGwwU3U1djUyWUFreFlvMmFoMVJWZk5QMEVqdkw4QWtUb2RsSEE1TGhjaVVhWjlBWkRLCmJXS0QwbGMzL0Q5bmVlSGpSZFpMSmhoVW5DNlFTbU9ad3Q4SFFka0NnWUVBZ0FRKzMzQjR5MHUyaDZNRW95WjgKOWFrTWRJcTl1VGRha0F0c1oydHg3MHgzTkZMMzUySW9LUVFYbGRud1FRQmRpdklJeTNFU0xCL3ZGMEZ6Sy9JRgpqYXVORGhNNGRiTmdQd0ZjR2xNQ25DdnNodW1pdWlMNnBQM2J5elljcXdEN1JjN25UanJ0U0ljaDFtTmpZUlBjCmw5M0ZGWFpJcDVMOE4xZ0ZzNkhMcTJFQ2dZRUEwVFJZMU50OERkaFhCeEZQaGFNTVVQcDhBM2JVUkNTaXlqVFAKSkU2VElkVmZpMXZVMUg4cW03cDlsWGp3cko0eXBBR002bHZKTEJxYzE5VFluTFIyUEoxMG1GUGFaUVR3ek8wQwpjZG1WY1VXMmVJVDlrbHFQdEV3RXNUdVJtRWVZc3BDcHlQb01HZTVTczVmbkVPSHdRcE8zYmJiUTBRZnl5eTdPClRMVzY0UUVDZ1lBNWZxbUhkYjU1Y0ZReDNyWlVqTkMyN3o5MTVBMzRjdkVLTlIvSjAxekFicUlHWFJ3dWRsQlcKYWQ5S1ZrSzhIenZHRVRlUTU1NmNXTU9yRGhyejZrSS9GWE9TL3poNnJmQ1JKV0xCL3ptSXlsdU8yZmR4VmQ2UAo5eStJY0tIN3dCcXFubkxlN3Nxb2FHU2Q5UTEzdTc4QWhnbGN1N3BocUlaWmVscHdMemRjYlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=`
|
||||
|
||||
func main() {
|
||||
k8sOperator := cmii_operator.CmiiK8sOperator{}
|
||||
k8sOperator.BuildCurrentClientFromConfig(realConfig)
|
||||
realNamespace := "zyga"
|
||||
|
||||
// get all pods
|
||||
//allInterface := k8sOperator.PodAllInterface(realNamespace)
|
||||
//
|
||||
//for _, deploymentInterface := range allInterface {
|
||||
// utils.BeautifulPrint(deploymentInterface)
|
||||
//}
|
||||
|
||||
// restart all backend
|
||||
cmii_operator.RestartCmiiBackendDeployment(realNamespace)
|
||||
|
||||
//cmii_operator.RestartCmiiFrontendDeployment(realNamespace)
|
||||
|
||||
//backMap := map[string]string{
|
||||
// "cmii-admin-data": "5.4.0",
|
||||
// "cmii-admin-gateway": "5.4.0",
|
||||
// "cmii-admin-user": "5.4.0",
|
||||
// "cmii-app-release": "4.2.0-validation",
|
||||
// "cmii-open-gateway": "5.4.0",
|
||||
// "cmii-suav-supervision": "5.2.0",
|
||||
// "cmii-uav-airspace": "5.4.0",
|
||||
// "cmii-uav-alarm": "5.4.0",
|
||||
// "cmii-uav-autowaypoint": "4.1.6-cm",
|
||||
// "cmii-uav-brain": "5.4.0",
|
||||
// "cmii-uav-cloud-live": "5.4.0",
|
||||
// "cmii-uav-clusters": "5.2.0",
|
||||
// "cmii-uav-cms": "5.3.0",
|
||||
// "cmii-uav-data-post-process": "5.4.0",
|
||||
// "cmii-uav-depotautoreturn": "5.4.0",
|
||||
// "cmii-uav-developer": "5.4.0",
|
||||
// "cmii-uav-device": "5.4.0-25916",
|
||||
// "cmii-uav-emergency": "5.3.0",
|
||||
// "cmii-uav-gateway": "5.4.0",
|
||||
// "cmii-uav-gis-server": "5.4.0",
|
||||
// "cmii-uav-grid-datasource": "5.2.0-24810",
|
||||
// "cmii-uav-grid-engine": "5.1.0",
|
||||
// "cmii-uav-grid-manage": "5.1.0",
|
||||
// "cmii-uav-industrial-portfolio": "5.4.0-27348-1",
|
||||
// "cmii-uav-integration": "5.4.0-25916",
|
||||
// "cmii-uav-kpi-monitor": "5.4.0",
|
||||
// "cmii-uav-logger": "5.4.0",
|
||||
// "cmii-uav-material-warehouse": "5.4.0",
|
||||
// "cmii-uav-mission": "5.4.0-26462-0307",
|
||||
// "cmii-uav-mqtthandler": "5.4.0-25916",
|
||||
// "cmii-uav-multilink": "5.4.0",
|
||||
// "cmii-uav-notice": "5.4.0",
|
||||
// "cmii-uav-oauth": "5.4.0",
|
||||
// "cmii-uav-process": "5.4.0",
|
||||
// "cmii-uav-surveillance": "5.4.0-25916",
|
||||
// "cmii-uav-threedsimulation": "5.1.0",
|
||||
// "cmii-uav-tower": "5.4.0",
|
||||
// "cmii-uav-user": "5.4.0",
|
||||
// "cmii-uav-waypoint": "5.4.0-26768",
|
||||
//}
|
||||
//
|
||||
//frontMap := map[string]string{
|
||||
// "cmii-suav-platform-supervision": "5.4.0",
|
||||
// "cmii-suav-platform-supervisionh5": "5.4.0",
|
||||
// "cmii-uav-platform": "5.4.0-25263",
|
||||
// "cmii-uav-platform-ai-brain": "5.4.0",
|
||||
// "cmii-uav-platform-armypeople": "5.4.0",
|
||||
// "cmii-uav-platform-base": "5.4.0",
|
||||
// "cmii-uav-platform-cms-portal": "5.4.0",
|
||||
// "cmii-uav-platform-detection": "5.4.0",
|
||||
// "cmii-uav-platform-emergency-rescue": "5.2.0",
|
||||
// "cmii-uav-platform-hljtt": "5.3.0-hjltt",
|
||||
// "cmii-uav-platform-jiangsuwenlv": "4.1.3-jiangsu-0427",
|
||||
// "cmii-uav-platform-logistics": "5.4.0",
|
||||
// "cmii-uav-platform-media": "5.4.0",
|
||||
// "cmii-uav-platform-multiterminal": "5.4.0",
|
||||
// "cmii-uav-platform-mws": "5.4.0",
|
||||
// "cmii-uav-platform-oms": "5.4.0",
|
||||
// "cmii-uav-platform-open": "5.4.0",
|
||||
// "cmii-uav-platform-qingdao": "4.1.6-24238-qingdao",
|
||||
// "cmii-uav-platform-qinghaitourism": "4.1.0-21377-0508",
|
||||
// "cmii-uav-platform-security": "4.1.6",
|
||||
// "cmii-uav-platform-securityh5": "5.4.0",
|
||||
// "cmii-uav-platform-seniclive": "5.2.0",
|
||||
// "cmii-uav-platform-share": "5.4.0",
|
||||
// "cmii-uav-platform-splice": "5.4.0",
|
||||
// "cmii-uav-platform-threedsimulation": "5.2.0-21392",
|
||||
// "cmii-uav-platform-visualization": "5.2.0",
|
||||
//}
|
||||
//
|
||||
//cmii_operator.CmiiOperator = k8sOperator
|
||||
//
|
||||
//result := cmii_operator.UpdateCmiiImageTagFromNameTagMap(realNamespace, backMap)
|
||||
//utils.BeautifulPrint(result)
|
||||
//
|
||||
//result = cmii_operator.UpdateCmiiImageTagFromNameTagMap(realNamespace, frontMap)
|
||||
//utils.BeautifulPrint(result)
|
||||
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package cmii_operator
|
||||
package image
|
||||
|
||||
var MiddlewareAmd64 = []string{
|
||||
"bitnami/redis:6.2.6-debian-10-r0",
|
||||
@@ -11,6 +11,8 @@ var MiddlewareAmd64 = []string{
|
||||
"bitnami/rabbitmq:3.9.12-debian-10-r3",
|
||||
"bitnami/rabbitmq:3.11.26-debian-11-r2",
|
||||
"ossrs/srs:v4.0.136",
|
||||
"ossrs/srs:v5.0.195",
|
||||
"ossrs/srs:v4.0-r3",
|
||||
"emqx/emqx:4.2.12",
|
||||
"nacos/nacos-server:v2.1.2",
|
||||
"nacos/nacos-server:v2.1.2-slim",
|
||||
@@ -20,8 +22,6 @@ var MiddlewareAmd64 = []string{
|
||||
"bitnami/minio:2023.5.4",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"ossrs/srs:v4.0-r3",
|
||||
"ossrs/srs:v5.0.195",
|
||||
"nginx:1.21.3",
|
||||
"redis:6.0.20-alpine",
|
||||
"dyrnq/nfs-subdir-external-provisioner:v4.0.2",
|
||||
@@ -1,4 +1,4 @@
|
||||
package cmii_operator
|
||||
package image
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -15,9 +15,12 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"wdd.io/agent-go/executor"
|
||||
"wdd.io/agent-go/logger"
|
||||
)
|
||||
|
||||
var apiClient = newClient()
|
||||
var log = logger.Log
|
||||
|
||||
const CmiiHarborPrefix = "harbor.cdcyy.com.cn/cmii/"
|
||||
|
||||
@@ -58,7 +61,7 @@ func getContainerList(all bool) []types.Container {
|
||||
return containers
|
||||
}
|
||||
|
||||
func ImageGetAll() []types.ImageSummary {
|
||||
func GetAll() []types.ImageSummary {
|
||||
|
||||
list, err := apiClient.ImageList(context.TODO(), types.ImageListOptions{
|
||||
All: true,
|
||||
@@ -71,9 +74,9 @@ func ImageGetAll() []types.ImageSummary {
|
||||
return list
|
||||
}
|
||||
|
||||
func ImageGetByName(imageName string) *types.ImageSummary {
|
||||
func GetByName(imageName string) *types.ImageSummary {
|
||||
|
||||
imageGetAll := ImageGetAll()
|
||||
imageGetAll := GetAll()
|
||||
|
||||
for _, imageSummary := range imageGetAll {
|
||||
for _, tag := range imageSummary.RepoTags {
|
||||
@@ -85,8 +88,8 @@ func ImageGetByName(imageName string) *types.ImageSummary {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ImageDelete(imageName string) []types.ImageDeleteResponseItem {
|
||||
imageGetByName := ImageGetByName(imageName)
|
||||
func Delete(imageName string) []types.ImageDeleteResponseItem {
|
||||
imageGetByName := GetByName(imageName)
|
||||
if imageGetByName == nil {
|
||||
log.ErrorF("[ImageDelete] -- image not exists ! %s", imageGetByName.RepoTags)
|
||||
return nil
|
||||
@@ -104,31 +107,39 @@ func ImageDelete(imageName string) []types.ImageDeleteResponseItem {
|
||||
return remove
|
||||
}
|
||||
|
||||
func ImagePruneAllCmiiImages() (errorRemoveImageNameList []string) {
|
||||
func PruneAllCmiiImages() (errorRemoveImageNameList []string) {
|
||||
|
||||
imageGetAll := ImageGetAll()
|
||||
apiClient.ImagesPrune(context.TODO(), filters.Args{})
|
||||
|
||||
imageGetAll := GetAll()
|
||||
|
||||
// ip:8033
|
||||
//re := regexp.MustCompile(`\b(?:\d{1,3}\.){3}\d{1,3}:\d{1,4}`)
|
||||
|
||||
for _, imageSummary := range imageGetAll {
|
||||
if strings.Contains(imageSummary.RepoTags[0], CmiiHarborPrefix) {
|
||||
for _, repoTag := range imageSummary.RepoTags {
|
||||
if strings.HasPrefix(repoTag, CmiiHarborPrefix) {
|
||||
for _, tag := range imageSummary.RepoTags {
|
||||
_, err := apiClient.ImageRemove(context.TODO(), imageSummary.ID, types.ImageRemoveOptions{
|
||||
Force: true,
|
||||
PruneChildren: false,
|
||||
PruneChildren: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.ErrorF("[ImageDelete] -- ImageRemove error ! %s", err.Error())
|
||||
errorRemoveImageNameList = append(errorRemoveImageNameList, imageSummary.RepoTags[0])
|
||||
errorRemoveImageNameList = append(errorRemoveImageNameList, tag)
|
||||
}
|
||||
log.InfoF("[ImageDelete] - image remove of [%s] success!", tag)
|
||||
}
|
||||
}
|
||||
|
||||
log.InfoF("[ImageDelete] - image remove of [%s] success!", imageSummary.RepoTags[0])
|
||||
}
|
||||
}
|
||||
|
||||
return errorRemoveImageNameList
|
||||
}
|
||||
|
||||
func ImageTagFromSourceToTarget(sourceImageName, targetImageName string) bool {
|
||||
func TagFromSourceToTarget(sourceImageName, targetImageName string) bool {
|
||||
|
||||
getByName := ImageGetByName(sourceImageName)
|
||||
getByName := GetByName(sourceImageName)
|
||||
if getByName == nil {
|
||||
log.ErrorF("[ImageTagFromSourceToTarget] - %s not exits !", sourceImageName)
|
||||
return false
|
||||
@@ -143,9 +154,9 @@ func ImageTagFromSourceToTarget(sourceImageName, targetImageName string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func ImagePushToOctopusKindHarbor(targetImageName string) (pushResult io.ReadCloser) {
|
||||
func PushToOctopusKindHarbor(targetImageName string) (pushResult io.ReadCloser) {
|
||||
|
||||
if ImageGetByName(targetImageName) == nil {
|
||||
if GetByName(targetImageName) == nil {
|
||||
log.ErrorF("[ImagePushToOctopusKindHarbor] - %s not exits !", targetImageName)
|
||||
return pushResult
|
||||
}
|
||||
@@ -164,7 +175,7 @@ func ImagePushToOctopusKindHarbor(targetImageName string) (pushResult io.ReadClo
|
||||
return pushResult
|
||||
}
|
||||
|
||||
func ImageTagFromListAndPushToCHarbor(referenceImageList []string, targetHarborHost string) (errorPushImageNameList []string) {
|
||||
func TagFromListAndPushToCHarbor(referenceImageList []string, targetHarborHost string) (errorPushImageNameList []string) {
|
||||
|
||||
for _, imageName := range referenceImageList {
|
||||
// check image
|
||||
@@ -184,8 +195,8 @@ func ImageTagFromListAndPushToCHarbor(referenceImageList []string, targetHarborH
|
||||
|
||||
targetImageName := targetHarborHost + ":8033/" + targetProject + "/" + imageName
|
||||
|
||||
if ImageTagFromSourceToTarget(cmiiImageFullName, targetImageName) {
|
||||
pushResult := ImagePushToOctopusKindHarbor(targetImageName)
|
||||
if TagFromSourceToTarget(cmiiImageFullName, targetImageName) {
|
||||
pushResult := PushToOctopusKindHarbor(targetImageName)
|
||||
if pushResult == nil {
|
||||
errorPushImageNameList = append(errorPushImageNameList, cmiiImageFullName)
|
||||
continue
|
||||
@@ -204,7 +215,7 @@ func ImageTagFromListAndPushToCHarbor(referenceImageList []string, targetHarborH
|
||||
return errorPushImageNameList
|
||||
}
|
||||
|
||||
func ImagePullFromCmiiHarbor(imageName string) (pullResult io.ReadCloser) {
|
||||
func PullFromCmiiHarbor(imageName string) (pullResult io.ReadCloser) {
|
||||
pullResult, err := apiClient.ImagePull(context.TODO(), imageName, types.ImagePullOptions{
|
||||
All: false,
|
||||
RegistryAuth: "eyAidXNlcm5hbWUiOiAicmFkMDJfZHJvbmUiLCAicGFzc3dvcmQiOiAiRHJvbmVAMTIzNCIsICJlbWFpbCI6ICJpY2VAcXEuY29tIiB9Cg==",
|
||||
@@ -223,7 +234,7 @@ func ImagePullFromCmiiHarbor(imageName string) (pullResult io.ReadCloser) {
|
||||
|
||||
func ImagePullFromCmiiHarborByMap(imageVersionMap map[string]string, silentMode bool) (errorPullImageList []string) {
|
||||
|
||||
fullImageNameList := convertCMiiImageMapToList(imageVersionMap)
|
||||
fullImageNameList := ConvertCMiiImageMapToList(imageVersionMap)
|
||||
return ImagePullFromFullNameList(fullImageNameList)
|
||||
|
||||
}
|
||||
@@ -244,7 +255,7 @@ func ImagePullCMiiFromFileJson(filePathName string) {
|
||||
}
|
||||
|
||||
for image, tag := range resultMap {
|
||||
pullResult := ImagePullFromCmiiHarbor(image + ":" + tag)
|
||||
pullResult := PullFromCmiiHarbor(image + ":" + tag)
|
||||
if pullResult == nil {
|
||||
continue
|
||||
}
|
||||
@@ -267,7 +278,7 @@ func ImagePullFromFullNameList(fullImageNameList []string) (errorPullImageList [
|
||||
|
||||
for _, dep := range fullImageNameList {
|
||||
|
||||
pullResult := ImagePullFromCmiiHarbor(dep)
|
||||
pullResult := PullFromCmiiHarbor(dep)
|
||||
if pullResult == nil {
|
||||
errorPullImageList = append(errorPullImageList, dep)
|
||||
continue
|
||||
@@ -288,7 +299,7 @@ func ImagePullFromFullNameList(fullImageNameList []string) (errorPullImageList [
|
||||
return errorPullImageList
|
||||
}
|
||||
|
||||
func ImagePullFromListAndCompressSplit(fullImageNameList []string, gzipFolder string) (errorPullImageList, errorGzipImageList []string) {
|
||||
func PullFromListAndCompressSplit(fullImageNameList []string, gzipFolder string) (errorPullImageList, errorGzipImageList []string) {
|
||||
|
||||
errorPullImageList = ImagePullFromFullNameList(fullImageNameList)
|
||||
|
||||
@@ -301,7 +312,7 @@ func ImagePullFromListAndCompressSplit(fullImageNameList []string, gzipFolder st
|
||||
}
|
||||
|
||||
for _, image := range fullImageNameList {
|
||||
if !ImageSaveToTarGZ(image, gzipFolder) {
|
||||
if !SaveToTarGZ(image, gzipFolder) {
|
||||
errorGzipImageList = append(errorGzipImageList, image)
|
||||
}
|
||||
}
|
||||
@@ -356,9 +367,9 @@ func ImageLoadFromFolderPath(folderPath string) (errorLoadImageNameList []string
|
||||
return errorLoadImageNameList
|
||||
}
|
||||
|
||||
func ImageSaveToTarGZ(targetImageName, folderPathPrefix string) bool {
|
||||
func SaveToTarGZ(targetImageName, folderPathPrefix string) bool {
|
||||
|
||||
imageGetByName := ImageGetByName(targetImageName)
|
||||
imageGetByName := GetByName(targetImageName)
|
||||
if imageGetByName == nil {
|
||||
log.ErrorF("[ImageSaveToTarGZ] - %s not exits", targetImageName)
|
||||
return false
|
||||
@@ -370,12 +381,20 @@ func ImageSaveToTarGZ(targetImageName, folderPathPrefix string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
gzipImageFile := convertImageGzipFileName(imageGetByName.RepoTags[0])
|
||||
var realImageTag string
|
||||
for _, repoTag := range imageGetByName.RepoTags {
|
||||
if !strings.Contains(repoTag, "8033") {
|
||||
realImageTag = repoTag
|
||||
}
|
||||
}
|
||||
|
||||
gzipImageFile := convertImageGzipFileName(realImageTag)
|
||||
if !strings.HasSuffix(folderPathPrefix, "/") {
|
||||
folderPathPrefix += "/"
|
||||
}
|
||||
_ = os.MkdirAll(folderPathPrefix, os.ModeDir)
|
||||
gzipImageFile = folderPathPrefix + gzipImageFile
|
||||
log.InfoF("[ImageSaveToTarGZ] - start to save [%s] to [%s]", imageGetByName.RepoTags[0], gzipImageFile)
|
||||
log.InfoF("[ImageSaveToTarGZ] - start to save [%s] to [%s]", realImageTag, gzipImageFile)
|
||||
|
||||
_ = os.Remove(gzipImageFile)
|
||||
tarFile, err := os.Create(gzipImageFile)
|
||||
@@ -401,17 +420,20 @@ func ImageSaveToTarGZ(targetImageName, folderPathPrefix string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// convertImageGzipFileName 必须输出长度完全一致的内容
|
||||
func convertImageGzipFileName(imageRepoTag string) (gzipFileName string) {
|
||||
|
||||
split := strings.Split(imageRepoTag, ":")
|
||||
//log.DebugF(" %s to %s", imageRepoTag, split)
|
||||
if len(split) == 1 {
|
||||
return "docker=" + imageRepoTag + "=latest.tar.gz"
|
||||
// nginx
|
||||
return "docker=library=" + imageRepoTag + "=latest.tar.gz"
|
||||
}
|
||||
|
||||
first := strings.Split(split[0], "/")
|
||||
//log.DebugF(" split[0] %s to %s", split[0], first)
|
||||
if len(first) == 3 {
|
||||
|
||||
if strings.Contains(first[0], "cdcyy") {
|
||||
gzipFileName += "cmlc="
|
||||
} else {
|
||||
@@ -423,16 +445,28 @@ func convertImageGzipFileName(imageRepoTag string) (gzipFileName string) {
|
||||
gzipFileName += first[2]
|
||||
gzipFileName += "="
|
||||
|
||||
} else if len(first) == 4 {
|
||||
if strings.Contains(first[0], "cdcyy") {
|
||||
gzipFileName += "cmlc="
|
||||
} else {
|
||||
gzipFileName += "docker="
|
||||
}
|
||||
|
||||
gzipFileName += first[1]
|
||||
gzipFileName += "="
|
||||
gzipFileName += first[2]
|
||||
gzipFileName += "="
|
||||
} else if len(first) == 2 {
|
||||
//
|
||||
// bitnami/redis
|
||||
// ossrs/srs
|
||||
gzipFileName += "docker="
|
||||
gzipFileName += first[0]
|
||||
gzipFileName += "="
|
||||
gzipFileName += first[1]
|
||||
gzipFileName += "="
|
||||
} else if len(first) == 1 {
|
||||
//
|
||||
return "docker=" + split[0] + "=" + split[1] + ".tar.gz"
|
||||
// nginx
|
||||
return "docker=library=" + split[0] + "=" + split[1] + ".tar.gz"
|
||||
}
|
||||
|
||||
gzipFileName += split[1]
|
||||
@@ -441,7 +475,7 @@ func convertImageGzipFileName(imageRepoTag string) (gzipFileName string) {
|
||||
return gzipFileName
|
||||
}
|
||||
|
||||
func convertCMiiImageMapToList(cmiiImageVersionMap map[string]string) (fullImageNameList []string) {
|
||||
func ConvertCMiiImageMapToList(cmiiImageVersionMap map[string]string) (fullImageNameList []string) {
|
||||
|
||||
for image, tag := range cmiiImageVersionMap {
|
||||
s := CmiiHarborPrefix + image + ":" + tag
|
||||
@@ -469,3 +503,19 @@ func loginToDockerHub(HarborFullHost string) {
|
||||
|
||||
log.DebugF("[loginToDockerHub] - login is %s", login.Status)
|
||||
}
|
||||
|
||||
func WriteDependencyImageToFile() {
|
||||
imageFilePrefix := "C:\\Users\\wddsh\\Documents\\IdeaProjects\\ProjectOctopus\\cmii_operator\\image\\"
|
||||
|
||||
middleFile := imageFilePrefix + "middle-image.txt"
|
||||
_ = os.Remove(middleFile)
|
||||
for _, image := range MiddlewareAmd64 {
|
||||
executor.BasicAppendContentToFile(image+"\n", middleFile)
|
||||
}
|
||||
|
||||
rkeFile := imageFilePrefix + "rke-image.txt"
|
||||
_ = os.Remove(rkeFile)
|
||||
for _, image := range Rancher1204Amd64 {
|
||||
executor.BasicAppendContentToFile(image+"\n", rkeFile)
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
package cmii_operator
|
||||
package image
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"wdd.io/agent-go/assert"
|
||||
"wdd.io/agent-go/utils"
|
||||
)
|
||||
@@ -28,7 +30,7 @@ func TestGetAllContainer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageGetAll(t *testing.T) {
|
||||
imageGetAll := ImageGetAll()
|
||||
imageGetAll := GetAll()
|
||||
|
||||
for _, summary := range imageGetAll {
|
||||
utils.BeautifulPrint(summary)
|
||||
@@ -37,14 +39,14 @@ func TestImageGetAll(t *testing.T) {
|
||||
|
||||
func TestImageGetByName(t *testing.T) {
|
||||
image := "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6-beta"
|
||||
imageGetByName := ImageGetByName(image)
|
||||
imageGetByName := GetByName(image)
|
||||
|
||||
utils.BeautifulPrint(imageGetByName)
|
||||
}
|
||||
|
||||
func TestImageDelete(t *testing.T) {
|
||||
image := "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6-beta"
|
||||
imageDelete := ImageDelete(image)
|
||||
imageDelete := Delete(image)
|
||||
|
||||
for _, item := range imageDelete {
|
||||
utils.BeautifulPrint(item)
|
||||
@@ -54,7 +56,7 @@ func TestImageDelete(t *testing.T) {
|
||||
func TestImagePullFromCmiiHarbor(t *testing.T) {
|
||||
image := "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6-beta"
|
||||
|
||||
pullFromCmiiHarbor := ImagePullFromCmiiHarbor(image)
|
||||
pullFromCmiiHarbor := PullFromCmiiHarbor(image)
|
||||
defer pullFromCmiiHarbor.Close()
|
||||
|
||||
scanner := bufio.NewScanner(pullFromCmiiHarbor)
|
||||
@@ -82,11 +84,11 @@ func TestImagePushToOctopusKindHarbor(t *testing.T) {
|
||||
// re-tag
|
||||
image := "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6-beta"
|
||||
newTag := "10.250.0.100:8033/cmii/cmii-uav-gateway:4.1.6-beta"
|
||||
target := ImageTagFromSourceToTarget(image, newTag)
|
||||
target := TagFromSourceToTarget(image, newTag)
|
||||
assert.Equal(t, target, true, "image re-tag error !")
|
||||
|
||||
// push
|
||||
pushResult := ImagePushToOctopusKindHarbor(newTag)
|
||||
pushResult := PushToOctopusKindHarbor(newTag)
|
||||
defer pushResult.Close()
|
||||
|
||||
scanner := bufio.NewScanner(pushResult)
|
||||
@@ -106,7 +108,7 @@ func TestImageLoadFromFile(t *testing.T) {
|
||||
func TestImageSaveToTarGZ(t *testing.T) {
|
||||
image := "harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6-beta"
|
||||
|
||||
imageSaveToTarGZ := ImageSaveToTarGZ(image, "/home/wdd/IdeaProjects/ProjectOctopus/cmii_operator/log")
|
||||
imageSaveToTarGZ := SaveToTarGZ(image, "/home/wdd/IdeaProjects/ProjectOctopus/cmii_operator/log")
|
||||
|
||||
assert.Equal(t, imageSaveToTarGZ, true, "image save to tar gz file error !")
|
||||
}
|
||||
@@ -115,26 +117,18 @@ func TestConvertImageGzipFileName(t *testing.T) {
|
||||
|
||||
test := []string{
|
||||
"bitnami/redis:6.2.6-debian-10-r0",
|
||||
"bitnami/redis:6.2.14-debian-11-r1",
|
||||
"bitnami/mysql:8.0.35-debian-11-r1",
|
||||
"bitnami/mysql:8.1.0-debian-11-r42",
|
||||
"simonrupf/chronyd:0.4.3",
|
||||
"bitnami/bitnami-shell:10-debian-10-r140",
|
||||
"bitnami/bitnami-shell:11-debian-11-r136",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0",
|
||||
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v4.0.136",
|
||||
"bitnami/rabbitmq:3.9.12-debian-10-r3",
|
||||
"bitnami/rabbitmq:3.11.26-debian-11-r2",
|
||||
"ossrs/srs:v4.0.136",
|
||||
"emqx/emqx:4.2.12",
|
||||
"nacos/nacos-server:v2.1.2",
|
||||
"nacos/nacos-server:v2.1.2-slim",
|
||||
"mongo:5.0",
|
||||
"rabbitmq:3.9-management",
|
||||
"bitnami/minio:2022.5.4",
|
||||
"bitnami/minio:2023.5.4",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"ossrs/srs:v4.0-r3",
|
||||
"nginx:1.21.3",
|
||||
"redis:6.0.20-alpine",
|
||||
"dyrnq/nfs-subdir-external-provisioner:v4.0.2",
|
||||
"busybox:latest",
|
||||
@@ -143,13 +137,13 @@ func TestConvertImageGzipFileName(t *testing.T) {
|
||||
|
||||
for _, s := range test {
|
||||
gzipFileName := convertImageGzipFileName(s)
|
||||
t.Logf(" %s to %s", s, gzipFileName)
|
||||
fmt.Printf(" %s to %s \n", s, gzipFileName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImagePruneAllCmiiImages(t *testing.T) {
|
||||
|
||||
errorRemoveImageNameList := ImagePruneAllCmiiImages()
|
||||
errorRemoveImageNameList := PruneAllCmiiImages()
|
||||
|
||||
utils.BeautifulPrintListWithTitle(errorRemoveImageNameList, "CMII Image Prune Error")
|
||||
}
|
||||
@@ -159,8 +153,8 @@ func TestImageTagFromSourceToTarget(t *testing.T) {
|
||||
sourceImageName := "ossrs/srs:v5.0.195"
|
||||
targetImageName := "harbor.wdd.io:8033/cmii/srs:v5.0.195"
|
||||
|
||||
if ImageTagFromSourceToTarget(sourceImageName, targetImageName) {
|
||||
pushResult := ImagePushToOctopusKindHarbor(targetImageName)
|
||||
if TagFromSourceToTarget(sourceImageName, targetImageName) {
|
||||
pushResult := PushToOctopusKindHarbor(targetImageName)
|
||||
defer pushResult.Close()
|
||||
|
||||
scanner := bufio.NewScanner(pushResult)
|
||||
@@ -170,3 +164,29 @@ func TestImageTagFromSourceToTarget(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func fibonacci(c, quit chan int64) {
|
||||
x, y := int64(0), int64(1)
|
||||
for {
|
||||
select {
|
||||
case c <- x:
|
||||
x, y = y, x+y
|
||||
fmt.Println("count is " + strconv.FormatInt(int64(<-c), 10))
|
||||
case <-quit:
|
||||
fmt.Println("quit current x is " + strconv.FormatInt(int64(x), 10))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestWriteDependencyImageToFile(t *testing.T) {
|
||||
//WriteDependencyImageToFile()
|
||||
|
||||
c := make(chan int64, 1)
|
||||
quit := make(chan int64, 1)
|
||||
go fibonacci(c, quit)
|
||||
|
||||
after := time.After(time.Second)
|
||||
|
||||
<-after
|
||||
quit <- 1
|
||||
}
|
||||
184
cmii_operator/image/image_sync.sh
Normal file
184
cmii_operator/image/image_sync.sh
Normal file
@@ -0,0 +1,184 @@
|
||||
#!/bin/bash
|
||||
|
||||
cmii_image_list=(
|
||||
cmlc=cmii=cmii-admin-data=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-admin-gateway=5.4.0.tar.gz
|
||||
docker=ossrs=srs=v4.0.136.tar.gz
|
||||
)
|
||||
middle_image_list=(
|
||||
cmlc=cmii=nfs-subdir-external-provisioner=v4.0.2.tar.gz
|
||||
docker=busybox=latest.tar.gz
|
||||
cmlc=cmii=srs=v4.0-r3.tar.gz
|
||||
docker=emqx=emqx=4.2.12.tar.gz
|
||||
docker=bitnami=bitnami-shell=10-debian-10-r140.tar.gz
|
||||
docker=kubernetesui=dashboard=v2.0.1.tar.gz
|
||||
docker=bitnami=bitnami-shell=11-debian-11-r136.tar.gz
|
||||
docker=kubernetesui=metrics-scraper=v1.0.4.tar.gz
|
||||
docker=bitnami=minio=2022.5.4.tar.gz
|
||||
docker=mongo=5.0.tar.gz
|
||||
docker=bitnami=minio=2023.5.4.tar.gz
|
||||
docker=nacos=nacos-server=v2.1.2-slim.tar.gz
|
||||
docker=bitnami=mysql=8.0.35-debian-11-r1.tar.gz
|
||||
docker=nginx=1.21.3.tar.gz
|
||||
docker=bitnami=mysql=8.1.0-debian-11-r42.tar.gz
|
||||
docker=ossrs=srs=v4.0.136.tar.gz
|
||||
docker=bitnami=rabbitmq=3.11.26-debian-11-r2.tar.gz
|
||||
docker=ossrs=srs=v5.0.195.tar.gz
|
||||
docker=bitnami=rabbitmq=3.9.12-debian-10-r3.tar.gz
|
||||
docker=rabbitmq=3.9-management.tar.gz
|
||||
docker=bitnami=redis=6.2.14-debian-11-r1.tar.gz
|
||||
docker=redis=6.0.20-alpine.tar.gz
|
||||
docker=bitnami=redis=6.2.6-debian-10-r0.tar.gz
|
||||
docker=simonrupf=chronyd=0.4.3.tar.gz
|
||||
)
|
||||
rke_image_list=(
|
||||
docker=rancher=backup-restore-operator=v1.0.3.tar.gz
|
||||
docker=rancher=calico-cni=v3.17.2.tar.gz
|
||||
docker=rancher=calico-ctl=v3.17.2.tar.gz
|
||||
docker=rancher=calico-kube-controllers=v3.17.2.tar.gz
|
||||
docker=rancher=calico-node=v3.17.2.tar.gz
|
||||
docker=rancher=calico-pod2daemon-flexvol=v3.17.2.tar.gz
|
||||
docker=rancher=cis-operator=v1.0.3.tar.gz
|
||||
docker=rancher=cluster-proportional-autoscaler=1.7.1.tar.gz
|
||||
docker=rancher=cluster-proportional-autoscaler=1.8.1.tar.gz
|
||||
docker=rancher=configmap-reload=v0.3.0-rancher4.tar.gz
|
||||
docker=rancher=coredns-coredns=1.8.0.tar.gz
|
||||
docker=rancher=coreos-etcd=v3.4.14-rancher1.tar.gz
|
||||
docker=rancher=coreos-flannel=v0.13.0-rancher1.tar.gz
|
||||
docker=rancher=coreos-kube-state-metrics=v1.9.7.tar.gz
|
||||
docker=rancher=coreos-prometheus-config-reloader=v0.39.0.tar.gz
|
||||
docker=rancher=coreos-prometheus-operator=v0.39.0.tar.gz
|
||||
docker=rancher=externalip-webhook=v0.1.6.tar.gz
|
||||
docker=rancher=flannel-cni=v0.3.0-rancher6.tar.gz
|
||||
docker=rancher=fleet-agent=v0.3.4.tar.gz
|
||||
docker=rancher=fleet=v0.3.4.tar.gz
|
||||
docker=rancher=fluentd=v0.1.24.tar.gz
|
||||
docker=rancher=grafana-grafana=7.1.5.tar.gz
|
||||
docker=rancher=hyperkube=v1.20.4-rancher1.tar.gz
|
||||
docker=rancher=istio-kubectl=1.5.10.tar.gz
|
||||
docker=rancher=jimmidyson-configmap-reload=v0.3.0.tar.gz
|
||||
docker=rancher=k8s-dns-dnsmasq-nanny=1.15.2.tar.gz
|
||||
docker=rancher=k8s-dns-kube-dns=1.15.2.tar.gz
|
||||
docker=rancher=k8s-dns-node-cache=1.15.13.tar.gz
|
||||
docker=rancher=k8s-dns-sidecar=1.15.2.tar.gz
|
||||
docker=rancher=klipper-lb=v0.1.2.tar.gz
|
||||
docker=rancher=kube-api-auth=v0.1.4.tar.gz
|
||||
docker=rancher=kubernetes-external-dns=v0.7.3.tar.gz
|
||||
docker=rancher=library-busybox=1.31.1.tar.gz
|
||||
docker=rancher=library-busybox=1.32.1.tar.gz
|
||||
docker=rancher=library-nginx=1.19.2-alpine.tar.gz
|
||||
docker=rancher=library-traefik=1.7.19.tar.gz
|
||||
docker=rancher=local-path-provisioner=v0.0.11.tar.gz
|
||||
docker=rancher=local-path-provisioner=v0.0.14.tar.gz
|
||||
docker=rancher=local-path-provisioner=v0.0.19.tar.gz
|
||||
docker=rancher=log-aggregator=v0.1.7.tar.gz
|
||||
docker=rancher=metrics-server=v0.4.1.tar.gz
|
||||
docker=rancher=nginx-ingress-controller-defaultbackend=1.5-rancher1.tar.gz
|
||||
docker=rancher=nginx-ingress-controller=nginx-0.43.0-rancher1.tar.gz
|
||||
docker=rancher=opa-gatekeeper=v3.1.0-beta.7.tar.gz
|
||||
docker=rancher=openzipkin-zipkin=2.14.2.tar.gz
|
||||
docker=rancher=pause=3.2.tar.gz
|
||||
docker=rancher=plugins-docker=18.09.tar.gz
|
||||
docker=rancher=prom-alertmanager=v0.21.0.tar.gz
|
||||
docker=rancher=prometheus-auth=v0.2.1.tar.gz
|
||||
docker=rancher=prom-node-exporter=v1.0.1.tar.gz
|
||||
docker=rancher=prom-prometheus=v2.18.2.tar.gz
|
||||
docker=rancher=rancher-agent=v2.5.7.tar.gz
|
||||
docker=rancher=rancher=v2.5.7.tar.gz
|
||||
docker=rancher=rancher-webhook=v0.1.0-beta9.tar.gz
|
||||
docker=rancher=rke-tools=v0.1.72.tar.gz
|
||||
docker=rancher=security-scan=v0.1.14.tar.gz
|
||||
docker=rancher=security-scan=v0.2.2.tar.gz
|
||||
docker=rancher=shell=v0.1.6.tar.gz
|
||||
docker=rancher=sonobuoy-sonobuoy=v0.16.3.tar.gz
|
||||
docker=rancher=system-upgrade-controller=v0.6.2.tar.gz
|
||||
)
|
||||
oss_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/shls
|
||||
oss_middle_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/mid-image-amd64
|
||||
oss_rke_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/rke-image-amd64
|
||||
target_harbor_host=103.0.180.181:8033
|
||||
|
||||
cmii_image_download_from_oss() {
|
||||
for image in "${cmii_image_list[@]}"; do
|
||||
echo "start to download => $image"
|
||||
curl -x socks5h://103.0.180.82:9997 $oss_prefix/$image -o $image
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
middle_image_download_from_oss() {
|
||||
mkdir -p /wdd/image/middle/
|
||||
for image in "${middle_image_list[@]}"; do
|
||||
echo "start to download => $image"
|
||||
curl -x socks5h://103.0.180.82:9997 $oss_middle_prefix/$image -o /wdd/image/middle/$image
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
rke_image_download_from_oss() {
|
||||
mkdir -p /wdd/image/rke/
|
||||
for image in "${rke_image_list[@]}"; do
|
||||
echo "start to download => $image"
|
||||
curl -x socks5h://103.0.180.82:9997 $oss_rke_prefix/$image -o /wdd/image/rke/$image
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
image_load_to_harbor() {
|
||||
local cmii_harbor_prefix="harbor.cdcyy.com.cn/cmii/"
|
||||
for image in "${cmii_image_list[@]}"; do
|
||||
echo "start to load => $image"
|
||||
docker load <"$image"
|
||||
echo ""
|
||||
if [[ $image == cmlc* ]]; then
|
||||
local app_name=$(echo $image | cut -d "=" -f3)
|
||||
local ccc=$(echo $image | cut -d "=" -f4)
|
||||
local app_tag="${ccc%.tar.gz}"
|
||||
echo "from $cmii_harbor_prefix$app_name:$app_tag ==> $target_harbor_host/cmii/$app_name:$app_tag"
|
||||
docker tag "$cmii_harbor_prefix$app_name:$app_tag" "$target_harbor_host/cmii/$app_name:$app_tag"
|
||||
|
||||
echo "start to push => $target_harbor_host/cmii/$app_name:$app_tag"
|
||||
docker login -u admin -p V2ryStr@ngPss $target_harbor_host
|
||||
docker push "$target_harbor_host/cmii/$app_name:$app_tag"
|
||||
echo ""
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
|
||||
for image in "${rke_image_list[@]}"; do
|
||||
echo "start to load => $image"
|
||||
docker load <"$image"
|
||||
echo ""
|
||||
local app_name_prefix=$(echo $image | cut -d "=" -f2)
|
||||
local app_name=$(echo $image | cut -d "=" -f3)
|
||||
local ccc=$(echo $image | cut -d "=" -f4)
|
||||
local app_tag="${ccc%.tar.gz}"
|
||||
echo "from $app_name_prefix/$app_name:$app_tag ==> $target_harbor_host/rancher/$app_name:$app_tag"
|
||||
docker tag "$app_name_prefix/$app_name:$app_tag" "$target_harbor_host/rancher/$app_name:$app_tag"
|
||||
echo "start to push => $target_harbor_host/rancher/$app_name:$app_tag"
|
||||
docker login -u admin -p V2ryStr@ngPss $target_harbor_host
|
||||
docker push "$target_harbor_host/rancher/$app_name:$app_tag"
|
||||
echo
|
||||
done
|
||||
|
||||
# for image in "${middle_image_list[@]}"; do
|
||||
# echo "start to load => $image"
|
||||
# docker load <"$image"
|
||||
# echo ""
|
||||
# local app_name_prefix=$(echo $image | cut -d "=" -f2)
|
||||
# local app_name=$(echo $image | cut -d "=" -f3)
|
||||
# local ccc=$(echo $image | cut -d "=" -f4)
|
||||
# local app_tag="${ccc%.tar.gz}"
|
||||
# echo "from $app_name_prefix/$app_name:$app_tag ==> $target_harbor_host/rancher/$app_name:$app_tag"
|
||||
# echo "start to push => $target_harbor_host/rancher/$app_name:$app_tag"
|
||||
# docker login -u admin -p V2ryStr@ngPss $target_harbor_host
|
||||
# docker push "$target_harbor_host/rancher/$app_name:$app_tag"
|
||||
# done
|
||||
|
||||
}
|
||||
|
||||
create_harbor_project() {
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$target_harbor_host/api/v2.0/projects
|
||||
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"rancher","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$target_harbor_host/api/v2.0/projects
|
||||
}
|
||||
15
cmii_operator/image/image_upload_oss.sh
Normal file
15
cmii_operator/image/image_upload_oss.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
#for image in $(ls /root/octopus_image/rke)
|
||||
#do
|
||||
# echo "start to upload => $image"
|
||||
# mc cp /root/octopus_image/rke/$image demo/cmlc-installation/rke-image-amd64/
|
||||
# echo ""
|
||||
#done
|
||||
|
||||
export local_path=/root/octopus_image/5.4.0
|
||||
for image in $(ls $local_path); do
|
||||
echo "start to upload => $image"
|
||||
mc cp $local_path/$image demo/cmlc-installation/5.4.0/
|
||||
echo ""
|
||||
done
|
||||
25
cmii_operator/image/middle-image.txt
Normal file
25
cmii_operator/image/middle-image.txt
Normal file
@@ -0,0 +1,25 @@
|
||||
bitnami/redis:6.2.6-debian-10-r0
|
||||
bitnami/redis:6.2.14-debian-11-r1
|
||||
bitnami/mysql:8.0.35-debian-11-r1
|
||||
bitnami/mysql:8.1.0-debian-11-r42
|
||||
simonrupf/chronyd:0.4.3
|
||||
bitnami/bitnami-shell:10-debian-10-r140
|
||||
bitnami/bitnami-shell:11-debian-11-r136
|
||||
bitnami/rabbitmq:3.9.12-debian-10-r3
|
||||
bitnami/rabbitmq:3.11.26-debian-11-r2
|
||||
ossrs/srs:v4.0.136
|
||||
ossrs/srs:v5.0.195
|
||||
emqx/emqx:4.2.12
|
||||
nacos/nacos-server:v2.1.2
|
||||
nacos/nacos-server:v2.1.2-slim
|
||||
mongo:5.0
|
||||
rabbitmq:3.9-management
|
||||
bitnami/minio:2022.5.4
|
||||
bitnami/minio:2023.5.4
|
||||
kubernetesui/dashboard:v2.0.1
|
||||
kubernetesui/metrics-scraper:v1.0.4
|
||||
ossrs/srs:v4.0-r3
|
||||
nginx:1.21.3
|
||||
redis:6.0.20-alpine
|
||||
dyrnq/nfs-subdir-external-provisioner:v4.0.2
|
||||
busybox:latest
|
||||
62
cmii_operator/image/rke-image.txt
Normal file
62
cmii_operator/image/rke-image.txt
Normal file
@@ -0,0 +1,62 @@
|
||||
rancher/backup-restore-operator:v1.0.3
|
||||
rancher/calico-cni:v3.17.2
|
||||
rancher/calico-ctl:v3.17.2
|
||||
rancher/calico-kube-controllers:v3.17.2
|
||||
rancher/calico-node:v3.17.2
|
||||
rancher/calico-pod2daemon-flexvol:v3.17.2
|
||||
rancher/cis-operator:v1.0.3
|
||||
rancher/cluster-proportional-autoscaler:1.7.1
|
||||
rancher/coredns-coredns:1.8.0
|
||||
rancher/coreos-etcd:v3.4.14-rancher1
|
||||
rancher/coreos-kube-state-metrics:v1.9.7
|
||||
rancher/coreos-prometheus-config-reloader:v0.39.0
|
||||
rancher/coreos-prometheus-operator:v0.39.0
|
||||
rancher/externalip-webhook:v0.1.6
|
||||
rancher/flannel-cni:v0.3.0-rancher6
|
||||
rancher/coreos-flannel:v0.13.0-rancher1
|
||||
rancher/fleet-agent:v0.3.4
|
||||
rancher/fleet:v0.3.4
|
||||
rancher/fluentd:v0.1.24
|
||||
rancher/grafana-grafana:7.1.5
|
||||
rancher/hyperkube:v1.20.4-rancher1
|
||||
rancher/jimmidyson-configmap-reload:v0.3.0
|
||||
rancher/k8s-dns-dnsmasq-nanny:1.15.2
|
||||
rancher/k8s-dns-kube-dns:1.15.2
|
||||
rancher/k8s-dns-node-cache:1.15.13
|
||||
rancher/k8s-dns-sidecar:1.15.2
|
||||
rancher/klipper-lb:v0.1.2
|
||||
rancher/kube-api-auth:v0.1.4
|
||||
rancher/kubectl:v1.20.4
|
||||
rancher/kubernetes-external-dns:v0.7.3
|
||||
rancher/cluster-proportional-autoscaler:1.8.1
|
||||
rancher/library-busybox:1.31.1
|
||||
rancher/library-busybox:1.32.1
|
||||
rancher/library-nginx:1.19.2-alpine
|
||||
rancher/library-traefik:1.7.19
|
||||
rancher/local-path-provisioner:v0.0.11
|
||||
rancher/local-path-provisioner:v0.0.14
|
||||
rancher/local-path-provisioner:v0.0.19
|
||||
rancher/log-aggregator:v0.1.7
|
||||
rancher/istio-kubectl:1.5.10
|
||||
rancher/metrics-server:v0.4.1
|
||||
rancher/configmap-reload:v0.3.0-rancher4
|
||||
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
|
||||
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
|
||||
rancher/opa-gatekeeper:v3.1.0-beta.7
|
||||
rancher/openzipkin-zipkin:2.14.2
|
||||
rancher/pause:3.2
|
||||
rancher/plugins-docker:18.09
|
||||
rancher/prom-alertmanager:v0.21.0
|
||||
rancher/prom-node-exporter:v1.0.1
|
||||
rancher/prom-prometheus:v2.18.2
|
||||
rancher/prometheus-auth:v0.2.1
|
||||
rancher/rancher-agent:v2.5.7
|
||||
rancher/rancher-webhook:v0.1.0-beta9
|
||||
rancher/rancher:v2.5.7
|
||||
rancher/rke-tools:v0.1.72
|
||||
jerrychina2020/rke-tools:v0.175-linux
|
||||
rancher/security-scan:v0.1.14
|
||||
rancher/security-scan:v0.2.2
|
||||
rancher/shell:v0.1.6
|
||||
rancher/sonobuoy-sonobuoy:v0.16.3
|
||||
rancher/system-upgrade-controller:v0.6.2
|
||||
@@ -28,3 +28,84 @@
|
||||
2024-02-23-10-55-14 uavcloud-demo cmii-uav-device 5.4.0-26906 5.4.0-26906-01
|
||||
2024-02-23-14-32-05 uavcloud-devflight cmii-uav-device 5.2.0-validation 5.4.0-26906-01
|
||||
2024-02-28-17-09-55 uavcloud-demo cmii-uav-device 5.4.0 5.4.0-26905
|
||||
2024-03-04-17-33-02 uavcloud-demo cmii-uav-platform 5.4.0-25263 5.4.0-hotfix
|
||||
2024-03-08-12-02-18 zyga cmii-uav-oauth 8033/cmii/cmii-uav-oauth5.4.0
|
||||
2024-03-08-12-02-19 zyga cmii-uav-user 8033/cmii/cmii-uav-user5.4.0
|
||||
2024-03-08-12-02-21 zyga cmii-uav-cms 8033/cmii/cmii-uav-cms5.3.0
|
||||
2024-03-08-12-02-22 zyga cmii-uav-industrial-portfolio 8033/cmii/cmii-uav-industrial-portfolio5.4.0-27348-1
|
||||
2024-03-08-12-02-23 zyga cmii-uav-surveillance 8033/cmii/cmii-uav-surveillance5.4.0-25916
|
||||
2024-03-08-12-02-24 zyga cmii-uav-mission 8033/cmii/cmii-uav-mission5.4.0-26462-0307
|
||||
2024-03-08-12-02-26 zyga cmii-admin-gateway 8033/cmii/cmii-admin-gateway5.4.0
|
||||
2024-03-08-12-02-27 zyga cmii-uav-alarm 8033/cmii/cmii-uav-alarm5.4.0
|
||||
2024-03-08-12-02-28 zyga cmii-uav-emergency 8033/cmii/cmii-uav-emergency5.3.0
|
||||
2024-03-08-12-02-30 zyga cmii-uav-material-warehouse 8033/cmii/cmii-uav-material-warehouse5.4.0
|
||||
2024-03-08-12-02-31 zyga cmii-uav-airspace 8033/cmii/cmii-uav-airspace5.4.0
|
||||
2024-03-08-12-02-33 zyga cmii-uav-brain 8033/cmii/cmii-uav-brain5.4.0
|
||||
2024-03-08-12-02-34 zyga cmii-uav-process 8033/cmii/cmii-uav-process5.4.0
|
||||
2024-03-08-12-02-36 zyga cmii-uav-notice 8033/cmii/cmii-uav-notice5.4.0
|
||||
2024-03-08-12-02-37 zyga cmii-uav-waypoint 8033/cmii/cmii-uav-waypoint5.4.0-26768
|
||||
2024-03-08-12-02-38 zyga cmii-uav-autowaypoint 8033/cmii/cmii-uav-autowaypoint4.1.6-cm
|
||||
2024-03-08-12-02-40 zyga cmii-uav-data-post-process 8033/cmii/cmii-uav-data-post-process5.4.0
|
||||
2024-03-08-12-02-41 zyga cmii-admin-data 8033/cmii/cmii-admin-data5.4.0
|
||||
2024-03-08-12-02-42 zyga cmii-uav-cloud-live 8033/cmii/cmii-uav-cloud-live5.4.0
|
||||
2024-03-08-12-02-43 zyga cmii-uav-gateway 8033/cmii/cmii-uav-gateway5.4.0
|
||||
2024-03-08-12-02-45 zyga cmii-uav-logger 8033/cmii/cmii-uav-logger5.4.0
|
||||
2024-03-08-12-02-46 zyga cmii-uav-mqtthandler 8033/cmii/cmii-uav-mqtthandler5.4.0-25916
|
||||
2024-03-08-12-02-47 zyga cmii-admin-user 8033/cmii/cmii-admin-user5.4.0
|
||||
2024-03-08-12-02-54 zyga cmii-suav-supervision 8033/cmii/cmii-suav-supervision5.2.0
|
||||
2024-03-08-12-02-55 zyga cmii-uav-developer 8033/cmii/cmii-uav-developer5.4.0
|
||||
2024-03-08-12-02-57 zyga cmii-uav-integration 8033/cmii/cmii-uav-integration5.4.0-25916
|
||||
2024-03-08-12-02-58 zyga cmii-open-gateway 8033/cmii/cmii-open-gateway5.4.0
|
||||
2024-03-08-12-02-59 zyga cmii-uav-device 8033/cmii/cmii-uav-device5.4.0-25916
|
||||
2024-03-08-14-06-05 zyga cmii-uav-cloud-live 8033/cmii/cmii-uav-cloud-live5.4.0
|
||||
2024-03-08-14-06-07 zyga cmii-uav-mqtthandler 8033/cmii/cmii-uav-mqtthandler5.4.0-25916
|
||||
2024-03-08-14-11-26 zyga cmii-uav-industrial-portfolio 8033/cmii/cmii-uav-industrial-portfolio5.4.0-27348-1
|
||||
2024-03-08-14-11-29 zyga cmii-open-gateway 8033/cmii/cmii-open-gateway5.4.0
|
||||
2024-03-08-14-11-31 zyga cmii-uav-developer 8033/cmii/cmii-uav-developer5.4.0
|
||||
2024-03-08-14-11-33 zyga cmii-admin-user 8033/cmii/cmii-admin-user5.4.0
|
||||
2024-03-08-14-11-35 zyga cmii-uav-mqtthandler 8033/cmii/cmii-uav-mqtthandler5.4.0-25916
|
||||
2024-03-08-14-11-37 zyga cmii-uav-user 8033/cmii/cmii-uav-user5.4.0
|
||||
2024-03-08-14-11-40 zyga cmii-uav-airspace 8033/cmii/cmii-uav-airspace5.4.0
|
||||
2024-03-08-14-11-42 zyga cmii-uav-logger 8033/cmii/cmii-uav-logger5.4.0
|
||||
2024-03-08-14-11-44 zyga cmii-uav-process 8033/cmii/cmii-uav-process5.4.0
|
||||
2024-03-08-14-11-48 zyga cmii-uav-notice 8033/cmii/cmii-uav-notice5.4.0
|
||||
2024-03-08-14-11-55 zyga cmii-uav-surveillance 8033/cmii/cmii-uav-surveillance5.4.0-25916
|
||||
2024-03-08-14-12-04 zyga cmii-uav-waypoint 8033/cmii/cmii-uav-waypoint5.4.0-26768
|
||||
2024-03-08-14-12-13 zyga cmii-uav-brain 8033/cmii/cmii-uav-brain5.4.0
|
||||
2024-03-08-14-12-20 zyga cmii-uav-cms 8033/cmii/cmii-uav-cms5.3.0
|
||||
2024-03-08-14-12-22 zyga cmii-uav-material-warehouse 8033/cmii/cmii-uav-material-warehouse5.4.0
|
||||
2024-03-08-14-12-24 zyga cmii-admin-gateway 8033/cmii/cmii-admin-gateway5.4.0
|
||||
2024-03-08-14-12-26 zyga cmii-uav-emergency 8033/cmii/cmii-uav-emergency5.3.0
|
||||
2024-03-08-14-12-28 zyga cmii-uav-gateway 8033/cmii/cmii-uav-gateway5.4.0
|
||||
2024-03-08-14-12-31 zyga cmii-uav-integration 8033/cmii/cmii-uav-integration5.4.0-25916
|
||||
2024-03-08-14-12-37 zyga cmii-admin-data 8033/cmii/cmii-admin-data5.4.0
|
||||
2024-03-08-14-12-40 zyga cmii-suav-supervision 8033/cmii/cmii-suav-supervision5.2.0
|
||||
2024-03-08-14-12-42 zyga cmii-uav-autowaypoint 8033/cmii/cmii-uav-autowaypoint4.1.6-cm
|
||||
2024-03-08-14-12-44 zyga cmii-uav-cloud-live 8033/cmii/cmii-uav-cloud-live5.4.0
|
||||
2024-03-08-14-12-46 zyga cmii-uav-mission 8033/cmii/cmii-uav-mission5.4.0-26462-0307
|
||||
2024-03-08-14-12-48 zyga cmii-uav-oauth 8033/cmii/cmii-uav-oauth5.4.0
|
||||
2024-03-08-14-12-50 zyga cmii-uav-alarm 8033/cmii/cmii-uav-alarm5.4.0
|
||||
2024-03-08-14-12-53 zyga cmii-uav-data-post-process 8033/cmii/cmii-uav-data-post-process5.4.0
|
||||
2024-03-08-14-12-55 zyga cmii-uav-device 8033/cmii/cmii-uav-device5.4.0-25916
|
||||
2024-03-08-14-12-57 zyga cmii-uav-platform-cms-portal 8033/cmii/cmii-uav-platform-cms-portal5.4.0
|
||||
2024-03-08-14-13-01 zyga cmii-uav-platform-detection 8033/cmii/cmii-uav-platform-detection5.4.0
|
||||
2024-03-08-14-13-15 zyga cmii-uav-platform-emergency-rescue 8033/cmii/cmii-uav-platform-emergency-rescue5.2.0
|
||||
2024-03-08-14-13-19 zyga cmii-uav-platform-media 8033/cmii/cmii-uav-platform-media5.4.0
|
||||
2024-03-08-14-13-32 zyga cmii-uav-platform-open 8033/cmii/cmii-uav-platform-open5.4.0
|
||||
2024-03-08-14-13-37 zyga cmii-uav-platform-splice 8033/cmii/cmii-uav-platform-splice5.4.0
|
||||
2024-03-08-14-13-50 zyga cmii-uav-platform 8033/cmii/cmii-uav-platform5.4.0-25263
|
||||
2024-03-08-14-13-54 zyga cmii-uav-platform-ai-brain 8033/cmii/cmii-uav-platform-ai-brain5.4.0
|
||||
2024-03-08-14-14-08 zyga cmii-uav-platform-armypeople 8033/cmii/cmii-uav-platform-armypeople5.4.0
|
||||
2024-03-08-14-14-12 zyga cmii-uav-platform-oms 8033/cmii/cmii-uav-platform-oms5.4.0
|
||||
2024-03-08-14-14-26 zyga cmii-uav-platform-base 8033/cmii/cmii-uav-platform-base5.4.0
|
||||
2024-03-08-14-14-30 zyga cmii-uav-platform-mws 8033/cmii/cmii-uav-platform-mws5.4.0
|
||||
2024-03-08-14-14-44 zyga cmii-uav-platform-visualization 8033/cmii/cmii-uav-platform-visualization5.2.0
|
||||
2024-03-08-14-14-48 zyga cmii-suav-platform-supervision 8033/cmii/cmii-suav-platform-supervision5.4.0
|
||||
2024-03-08-14-15-01 zyga cmii-uav-platform-logistics 8033/cmii/cmii-uav-platform-logistics5.4.0
|
||||
2024-03-08-14-15-06 zyga cmii-uav-platform-securityh5 8033/cmii/cmii-uav-platform-securityh55.4.0
|
||||
2024-03-08-14-15-19 zyga cmii-suav-platform-supervisionh5 8033/cmii/cmii-suav-platform-supervisionh55.4.0
|
||||
2024-03-08-14-15-23 zyga cmii-uav-platform-security 8033/cmii/cmii-uav-platform-security4.1.6
|
||||
2024-03-08-14-15-37 zyga cmii-uav-platform-seniclive 8033/cmii/cmii-uav-platform-seniclive5.2.0
|
||||
2024-03-08-14-15-41 zyga cmii-uav-platform-share 8033/cmii/cmii-uav-platform-share5.4.0
|
||||
2024-03-08-14-15-55 zyga cmii-uav-platform-multiterminal 8033/cmii/cmii-uav-platform-multiterminal5.4.0
|
||||
2024-03-08-15-16-14 uavcloud-demo cmii-uav-platform 5.4.0-25263 5.4.0
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
* Time: 2020.09.23
|
||||
*/
|
||||
|
||||
package port_forwarding
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* Time: 2020.08.17
|
||||
*/
|
||||
|
||||
package port_forwarding
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Time: 2020.09.02
|
||||
*/
|
||||
|
||||
package port_forwarding
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* Time: 2020.09.23
|
||||
*/
|
||||
|
||||
package port_forwarding
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* Time: 2020.09.23
|
||||
*/
|
||||
|
||||
package port_forwarding
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
@@ -6,6 +6,7 @@ import io.wdd.server.beans.po.ProjectInfoPO;
|
||||
import io.wdd.server.coreService.CoreProjectService;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
@@ -23,6 +24,43 @@ public class AppFuncScheduler {
|
||||
@Resource
|
||||
CoreProjectService coreProjectService;
|
||||
|
||||
/**
|
||||
* 参数固定顺序为 A1C2IP SUPREME N1C2IP A1C1IP A1C1JS M2D2IP KIMMY JACLOVE
|
||||
* 1 2 3 4 5 6 7 8
|
||||
*
|
||||
* @param projectDeployContext
|
||||
* @param projectInfoPO
|
||||
* @return
|
||||
*/
|
||||
private static ArrayList<String> buildAppFuncArgs(ProjectDeployContext projectDeployContext, ProjectInfoPO projectInfoPO) {
|
||||
|
||||
ArrayList<String> appFuncArgs = new ArrayList<>();
|
||||
String masterIpInV4 = projectDeployContext
|
||||
.getMasterNode()
|
||||
.getServerIpInV4();
|
||||
appFuncArgs.add(masterIpInV4);
|
||||
appFuncArgs.add(projectInfoPO.getProjectNamespace());
|
||||
appFuncArgs.add(masterIpInV4);
|
||||
// A1C1IP
|
||||
if (StringUtils.isBlank(projectDeployContext
|
||||
.getMasterNode()
|
||||
.getServerIpPbV4())) {
|
||||
appFuncArgs.add(masterIpInV4);
|
||||
} else {
|
||||
appFuncArgs.add(projectDeployContext
|
||||
.getMasterNode()
|
||||
.getServerIpPbV4());
|
||||
}
|
||||
appFuncArgs.add(projectInfoPO.getProjectWebServicePort());
|
||||
// M2D2IP
|
||||
appFuncArgs.add(masterIpInV4);
|
||||
appFuncArgs.add(projectInfoPO.getProjectVersion());
|
||||
// jackeyLoveFile
|
||||
appFuncArgs.add("init_5.1.0.tar");
|
||||
|
||||
return appFuncArgs;
|
||||
}
|
||||
|
||||
public boolean runProcedure(ProjectDeployContext projectDeployContext) {
|
||||
|
||||
// before run
|
||||
@@ -41,39 +79,6 @@ public class AppFuncScheduler {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* 参数固定顺序为 A1C2IP SUPREME N1C2IP A1C1IP A1C1JS M2D2IP KIMMY JACLOVE
|
||||
* 1 2 3 4 5 6 7 8
|
||||
*
|
||||
* @param projectDeployContext
|
||||
* @param projectInfoPO
|
||||
* @return
|
||||
*/
|
||||
private static ArrayList<String> buildAppFuncArgs(ProjectDeployContext projectDeployContext, ProjectInfoPO projectInfoPO) {
|
||||
|
||||
ArrayList<String> appFuncArgs = new ArrayList<>();
|
||||
appFuncArgs.add(projectDeployContext
|
||||
.getMasterNode()
|
||||
.getServerIpInV4());
|
||||
appFuncArgs.add(projectInfoPO.getProjectNamespace());
|
||||
appFuncArgs.add(projectDeployContext
|
||||
.getMasterNode()
|
||||
.getServerIpInV4());
|
||||
appFuncArgs.add(projectDeployContext
|
||||
.getMasterNode()
|
||||
.getServerIpPbV4());
|
||||
appFuncArgs.add(projectInfoPO.getProjectWebServicePort());
|
||||
// M2D2IP
|
||||
appFuncArgs.add(projectDeployContext
|
||||
.getMasterNode()
|
||||
.getServerIpInV4());
|
||||
appFuncArgs.add(projectInfoPO.getProjectVersion());
|
||||
// jackeyLoveFile
|
||||
appFuncArgs.add("init_5.1.0.tar");
|
||||
|
||||
return appFuncArgs;
|
||||
}
|
||||
|
||||
private void beforeRunProcedure(ProjectDeployContext projectDeployContext) {
|
||||
|
||||
// 检查是否符合规定
|
||||
|
||||
@@ -36,30 +36,36 @@ import static io.wdd.common.config.OctopusObjectMapperConfig.OctopusObjectMapper
|
||||
@Slf4j(topic = "octopus agent init ")
|
||||
public class AcceptAgentInitInfo {
|
||||
|
||||
public static final HashMap<String, Integer> ALL_SERVER_CITY_INDEX = new HashMap<>(
|
||||
Map.of(
|
||||
"Chengdu",
|
||||
1,
|
||||
"Shanghai",
|
||||
2,
|
||||
"HongKong",
|
||||
3,
|
||||
"Seoul",
|
||||
4,
|
||||
"Tokyo",
|
||||
5,
|
||||
"Phoenix",
|
||||
6,
|
||||
"London",
|
||||
7,
|
||||
"LosAngeles",
|
||||
8,
|
||||
"Beijing",
|
||||
9,
|
||||
"Chongqing",
|
||||
10
|
||||
)
|
||||
);
|
||||
public static final HashMap<String, Integer> ALL_SERVER_CITY_INDEX = new HashMap<>() {{
|
||||
put("Chengdu",
|
||||
1);
|
||||
put("Shanghai",
|
||||
2);
|
||||
put("HongKong",
|
||||
3);
|
||||
put("Seoul",
|
||||
4);
|
||||
put("Tokyo",
|
||||
5);
|
||||
put("Phoenix",
|
||||
6)
|
||||
;
|
||||
put("London",
|
||||
7)
|
||||
;
|
||||
put("LosAngeles",
|
||||
8);
|
||||
put("Beijing",
|
||||
9);
|
||||
put("Chongqing",
|
||||
10)
|
||||
;
|
||||
put("Ziyang",
|
||||
11);
|
||||
|
||||
}};
|
||||
|
||||
|
||||
public static Set<String> ALL_SERVER_ARCH_INFO = new HashSet<>(
|
||||
Arrays.asList(
|
||||
"amd64",
|
||||
|
||||
@@ -46,13 +46,17 @@ public class TestBaseFuncScheduler {
|
||||
|
||||
// Long projectServerId = 1751084188582440961L;// cqga
|
||||
// Long projectServerId = 1722453318596550657L;// lappro
|
||||
Long projectServerId = 1752602668144975873L;// cqlyj
|
||||
// Long projectServerId = 1752602668144975873L;// cqlyj
|
||||
// Long projectServerId = 1764575895186030593L;// 上海雷视联动
|
||||
// Long projectServerId = 1765290465066692610L;// 吉林移动
|
||||
Long projectServerId = 1765654823726669826L;// 资阳公安
|
||||
|
||||
projectDeployContext.setProjectId(projectServerId);
|
||||
|
||||
|
||||
String masterNodeServerName = "Chongqing-amd64-01"; // cgga
|
||||
// String masterNodeServerName = "Chengdu-amd64-99"; // lap pro
|
||||
// String masterNodeServerName = "Shanghai-amd64-09"; // cgga
|
||||
// String masterNodeServerName = "Chengdu-amd64-51"; // lap pro
|
||||
String masterNodeServerName = "Ziyang-amd64-61"; // 资阳公安
|
||||
|
||||
ProjectServerVO projectServerVO = coreProjectServerService.projectServerOne(projectServerId);
|
||||
Map<Boolean, List<ServerInfoPO>> collect = projectServerVO.getBindingServerList().stream().collect(
|
||||
@@ -63,7 +67,7 @@ public class TestBaseFuncScheduler {
|
||||
|
||||
|
||||
if (collect.get(Boolean.TRUE) == null) {
|
||||
System.out.printf("project of %s master server of %s is empty", projectServerVO, masterNodeServerName);
|
||||
System.out.printf("project of %s master server of %s is empty\n", projectServerVO, masterNodeServerName);
|
||||
return;
|
||||
}
|
||||
projectDeployContext.setMasterNode(collect.get(Boolean.TRUE).get(0));
|
||||
@@ -73,22 +77,23 @@ public class TestBaseFuncScheduler {
|
||||
|
||||
List<BaseFunctionEnum> masterNodeProcedure = List.of(
|
||||
// BaseFunctionEnum.DISABLE_SWAP,
|
||||
// BaseFunctionEnum.DISABLE_SELINUX,
|
||||
// BaseFunctionEnum.SHUTDOWN_FIREWALL,
|
||||
// BaseFunctionEnum.INSTALL_DOCKER,
|
||||
// BaseFunctionEnum.INSTALL_DEFAULT_SSH_KEY,
|
||||
// BaseFunctionEnum.INSTALL_DOCKER_COMPOSE,
|
||||
// BaseFunctionEnum.MODIFY_DOCKER_CONFIG,
|
||||
// BaseFunctionEnum.MODIFY_DOCKER_CONFIG
|
||||
// BaseFunctionEnum.INSTALL_HARBOR
|
||||
BaseFunctionEnum.INSTALL_DEFAULT_SSH_KEY
|
||||
);
|
||||
|
||||
List<BaseFunctionEnum> agentNodeProcedure = List.of(
|
||||
// BaseFunctionEnum.DISABLE_SWAP,
|
||||
// BaseFunctionEnum.SHUTDOWN_FIREWALL,
|
||||
// BaseFunctionEnum.INSTALL_DOCKER,
|
||||
// BaseFunctionEnum.INSTALL_DOCKER_COMPOSE
|
||||
// BaseFunctionEnum.MODIFY_DOCKER_CONFIG
|
||||
// BaseFunctionEnum.INSTALL_DEFAULT_SSH_KEY
|
||||
BaseFunctionEnum.DISABLE_SWAP,
|
||||
BaseFunctionEnum.INSTALL_DEFAULT_SSH_KEY,
|
||||
BaseFunctionEnum.DISABLE_SELINUX,
|
||||
BaseFunctionEnum.SHUTDOWN_FIREWALL,
|
||||
BaseFunctionEnum.INSTALL_DOCKER,
|
||||
BaseFunctionEnum.INSTALL_DOCKER_COMPOSE,
|
||||
BaseFunctionEnum.MODIFY_DOCKER_CONFIG
|
||||
);
|
||||
|
||||
projectDeployContext.setMasterNodeBaseProcedure(masterNodeProcedure);
|
||||
@@ -115,18 +120,18 @@ public class TestBaseFuncScheduler {
|
||||
List<AppFunctionEnum> appFunctionEnumList = List.of(
|
||||
// AppFunctionEnum.DEPLOY_CHRONY_SERVER,
|
||||
// AppFunctionEnum.DEPLOY_RKE
|
||||
// AppFunctionEnum.DEPLOY_K8S_DASHBOARD
|
||||
// AppFunctionEnum.DEPLOY_K8S_DASHBOARD,
|
||||
// AppFunctionEnum.DEPLOY_NFS,
|
||||
// AppFunctionEnum.DEPLOY_TEST_NFS,
|
||||
// AppFunctionEnum.DEPLOY_TEST_NFS
|
||||
// AppFunctionEnum.DEPLOY_K8S_NAMESPACE,
|
||||
// AppFunctionEnum.DEPLOY_K8S_PVC,
|
||||
// AppFunctionEnum.DEPLOY_K8S_MYSQL,
|
||||
// AppFunctionEnum.DEPLOY_K8S_REDIS,
|
||||
// AppFunctionEnum.DEPLOY_K8S_MIDDLEWARES
|
||||
// AppFunctionEnum.DEPLOY_INGRESS
|
||||
// AppFunctionEnum.DEPLOY_INGRESS,
|
||||
// AppFunctionEnum.DEPLOY_FRONTEND
|
||||
AppFunctionEnum.DEPLOY_BACKEND
|
||||
// AppFunctionEnum.DEPLOY_K8S_SRS
|
||||
// AppFunctionEnum.DEPLOY_BACKEND
|
||||
AppFunctionEnum.DEPLOY_K8S_SRS
|
||||
|
||||
);
|
||||
projectDeployContext.setMasterAppProcedure(appFunctionEnumList);
|
||||
@@ -138,7 +143,7 @@ public class TestBaseFuncScheduler {
|
||||
agentNodeProcedure = List.of(BaseFunctionEnum.CHRONY_TO_MASTER);
|
||||
|
||||
projectDeployContext.setMasterNodeBaseProcedure(masterNodeProcedure);
|
||||
// projectDeployContext.setAgentNodeBaseProcedure(agentNodeProcedure);
|
||||
projectDeployContext.setAgentNodeBaseProcedure(agentNodeProcedure);
|
||||
|
||||
// baseFuncScheduler.runProcedure(projectDeployContext);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
2
socks5_txthinking/build_all.ps1
Normal file
2
socks5_txthinking/build_all.ps1
Normal file
@@ -0,0 +1,2 @@
|
||||
C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd64 linux/arm64" -output "build/socks5_{{.OS}}_{{.Arch}}"
|
||||
C:\Users\wddsh\go\bin\gox.exe -osarch="windows/amd64" -output "build/socks5_win64"
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
@@ -6,23 +6,28 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
func ExampleServer() {
|
||||
s, err := NewClassicServer("127.0.0.1:1080", "127.0.0.1", "", "", 0, 60)
|
||||
func TestNewClassicServer(t *testing.T) {
|
||||
s, err := NewClassicServer(":9997", "0.0.0.0", "", "", 0, 60)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
// You can pass in custom Handler
|
||||
s.ListenAndServe(nil)
|
||||
err = s.ListenAndServe(nil)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
// #Output:
|
||||
}
|
||||
|
||||
func ExampleClient_tcp() {
|
||||
go ExampleServer()
|
||||
//go TestNewClassicServer()
|
||||
c, err := NewClient("127.0.0.1:1080", "", "", 0, 60)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
@@ -51,7 +56,7 @@ func ExampleClient_tcp() {
|
||||
}
|
||||
|
||||
func ExampleClient_udp() {
|
||||
go ExampleServer()
|
||||
//go ExampleServer()
|
||||
c, err := NewClient("127.0.0.1:1080", "", "", 0, 60)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
|
||||
@@ -6,3 +6,10 @@ require (
|
||||
github.com/miekg/dns v1.1.51
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
)
|
||||
|
||||
require (
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.2.0 // indirect
|
||||
golang.org/x/sys v0.2.0 // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
)
|
||||
|
||||
@@ -2,8 +2,6 @@ github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo=
|
||||
github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf h1:7PflaKRtU4np/epFxRXlFhlzLXZzKFrH5/I4so5Ove0=
|
||||
github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf/go.mod h1:CLUSJbazqETbaR+i0YAhXBICV9TrKH93pziccMhmhpM=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
@@ -1,5 +1,38 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
fmt.Println("start socks5 server error must provide listen port !")
|
||||
return
|
||||
}
|
||||
if len(os.Args) > 3 {
|
||||
fmt.Println("start socks5 server error !")
|
||||
return
|
||||
}
|
||||
username := ""
|
||||
password := ""
|
||||
if len(os.Args) == 4 {
|
||||
username = os.Args[2]
|
||||
password = os.Args[3]
|
||||
}
|
||||
|
||||
port := os.Args[1]
|
||||
|
||||
s, err := NewClassicServer(":"+port, "0.0.0.0", username, password, 0, 60)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
// You can pass in custom Handler
|
||||
err = s.ListenAndServe(nil)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package main
|
||||
package old_tcp_tailscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -241,6 +241,7 @@ func (s *Server) ListenAndServe(h Handler) error {
|
||||
return s.UDPConn.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return s.RunnerGroup.Wait()
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
const (
|
||||
// Ver is socks protocol version
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package socks5
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user