[Agent][Deploy] - 修改ImageSync模块 大量修改

This commit is contained in:
zeaslity
2024-08-21 13:52:49 +08:00
parent e26b7a7a00
commit ed6754e3d5
11 changed files with 346 additions and 219 deletions

View File

@@ -100,8 +100,11 @@ func ImageFullNameToGzipFileName(imageFullName string) (gzipFileName string) {
if len(first) == 3 {
// harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0
// docker.io/ossr/srs:v5.0.1
// docker.107421.xyz/rancher/calico-cni:v3.17.2
if strings.HasPrefix(split[0], CmiiHarborPrefix) {
gzipFileName += "cmlc=cmii="
} else if strings.Contains(split[0], "rancher") {
gzipFileName += "docker=rancher="
} else {
gzipFileName += "docker=cmii="
}
@@ -111,6 +114,7 @@ func ImageFullNameToGzipFileName(imageFullName string) (gzipFileName string) {
} else if len(first) == 4 {
// harbor.cdcyy.cn/cmii/ossr/srs:v5.0.1
// harbor.cdcyy.com.cn/cmii/cmlc-ai/cmlc-ai-operator:v5.2.0-t4-no-dino
if !strings.HasPrefix(split[0], CmiiHarborPrefix) {
return imageFullName
}

View File

@@ -12,17 +12,18 @@ import (
)
var imageFullNameList = []string{
"bitnami/redis:6.2.6-debian-10-r0",
"simonrupf/chronyd:0.4.3",
"harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v4.0.136",
"ossrs/srs:v4.0.136",
"mongo:5.0",
"bitnami/minio:2023.5.4",
"busybox:latest",
"busybox",
"rancher/rancher:v2.7.0",
"10.1.1.1:8033/cmii/ok:1.2",
//"bitnami/redis:6.2.6-debian-10-r0",
//"simonrupf/chronyd:0.4.3",
//"harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0",
//"harbor.cdcyy.com.cn/cmii/ossrs/srs:v4.0.136",
//"ossrs/srs:v4.0.136",
//"mongo:5.0",
//"bitnami/minio:2023.5.4",
//"busybox:latest",
//"busybox",
//"rancher/rancher:v2.7.0",
//"10.1.1.1:8033/cmii/ok:1.2",
"docker.107421.xyz/rancher/shell:v0.1.6",
}
func TestImageFullNameToGzipFileName(t *testing.T) {

View File

@@ -68,6 +68,33 @@ func AppendContentToFile(content string, targetFile string) bool {
return true
}
func AppendOverwriteListContentToFile(contentList []string, targetFile string) bool {
err := os.Remove(targetFile)
if err != nil {
log.WarnF("[AppendOverwriteListContentToFile] - Error removing file: %s , error is %s", targetFile, err.Error())
}
// 打开文件用于追加。如果文件不存在,将会创建一个新文件。
file, err := os.OpenFile(targetFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.ErrorF("[AppendOverwriteListContentToFile] - Error opening file: %s , error is %s", targetFile, err.Error())
return false
}
defer file.Close() // 确保文件最终被关闭
// 写入内容到文件
for _, contentLine := range contentList {
//bytes, _ := json.Marshal(contentLine)
if _, err := file.WriteString(contentLine + "\n"); err != nil {
log.ErrorF("[AppendOverwriteListContentToFile] - Error writing to file: %s , error is %s", targetFile, err.Error())
return false
}
}
return true
}
// AppendContentWithSplitLineToFile 专门为k8s的yaml文件设计的在每次写入内容之前先写入一行分隔符
func AppendContentWithSplitLineToFile(content string, targetFile string) bool {
@@ -174,6 +201,10 @@ func ListAllFileInFolder(folderName string) ([]string, error) {
return listAllFileInFolderWithFullPath(folderName, false)
}
func ListAllFileInFolderWithFullPath(folderName string) ([]string, error) {
return listAllFileInFolderWithFullPath(folderName, true)
}
func listAllFileInFolderWithFullPath(folderName string, fullPath bool) ([]string, error) {
files := make([]string, 0)
err := filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {

View File

@@ -131,98 +131,98 @@ var CmiiGISAppMap = map[string]string{
}
var MiddlewareAmd64 = []string{
"bitnami/redis:6.2.6-debian-10-r0",
"bitnami/redis:6.2.14-debian-11-r1",
"bitnami/mysql:8.0.35-debian-11-r1",
"bitnami/mysql:8.1.0-debian-11-r42",
"simonrupf/chronyd:0.4.3",
"bitnami/bitnami-shell:10-debian-10-r140",
"bitnami/bitnami-shell:11-debian-11-r136",
"bitnami/rabbitmq:3.9.12-debian-10-r3",
"bitnami/rabbitmq:3.11.26-debian-11-r2",
"ossrs/srs:v4.0.136",
"ossrs/srs:v5.0.195",
"ossrs/srs:v4.0-r3",
"emqx/emqx:4.4.19",
"emqx/emqx:5.5.1",
"nacos/nacos-server:v2.1.2",
"nacos/nacos-server:v2.1.2-slim",
"mongo:5.0",
"rabbitmq:3.9-management",
"bitnami/minio:2022.5.4",
"bitnami/minio:2023.5.4",
"kubernetesui/dashboard:v2.0.1",
"kubernetesui/metrics-scraper:v1.0.4",
"nginx:1.21.3",
"redis:6.0.20-alpine",
"dyrnq/nfs-subdir-external-provisioner:v4.0.2",
"jerrychina2020/rke-tools:v0.175-linux",
"jerrychina2020/rke-tools:v0.175",
"busybox:latest",
"docker.107421.xyz/bitnami/redis:6.2.6-debian-10-r0",
"docker.107421.xyz/bitnami/redis:6.2.14-debian-11-r1",
"docker.107421.xyz/bitnami/mysql:8.0.35-debian-11-r1",
"docker.107421.xyz/bitnami/mysql:8.1.0-debian-11-r42",
"docker.107421.xyz/simonrupf/chronyd:0.4.3",
"docker.107421.xyz/bitnami/bitnami-shell:10-debian-10-r140",
"docker.107421.xyz/bitnami/bitnami-shell:11-debian-11-r136",
"docker.107421.xyz/bitnami/rabbitmq:3.9.12-debian-10-r3",
"docker.107421.xyz/bitnami/rabbitmq:3.11.26-debian-11-r2",
"docker.107421.xyz/ossrs/srs:v4.0.136",
"docker.107421.xyz/ossrs/srs:v5.0.195",
"docker.107421.xyz/ossrs/srs:v4.0-r3",
"docker.107421.xyz/emqx/emqx:4.4.19",
"docker.107421.xyz/emqx/emqx:5.5.1",
"docker.107421.xyz/nacos/nacos-server:v2.1.2",
"docker.107421.xyz/nacos/nacos-server:v2.1.2-slim",
"docker.107421.xyz/library/mongo:5.0",
"docker.107421.xyz/library/rabbitmq:3.9-management",
"docker.107421.xyz/bitnami/minio:2022.5.4",
"docker.107421.xyz/bitnami/minio:2023.5.4",
"docker.107421.xyz/kubernetesui/dashboard:v2.0.1",
"docker.107421.xyz/kubernetesui/metrics-scraper:v1.0.4",
"docker.107421.xyz/library/nginx:1.21.3",
"docker.107421.xyz/library/redis:6.0.20-alpine",
"docker.107421.xyz/dyrnq/nfs-subdir-external-provisioner:v4.0.2",
"docker.107421.xyz/jerrychina2020/rke-tools:v0.175-linux",
"docker.107421.xyz/jerrychina2020/rke-tools:v0.175",
"docker.107421.xyz/library/busybox:latest",
}
var Rancher1204Amd64 = []string{
"rancher/backup-restore-operator:v1.0.3",
"rancher/calico-cni:v3.17.2",
"rancher/calico-ctl:v3.17.2",
"rancher/calico-kube-controllers:v3.17.2",
"rancher/calico-node:v3.17.2",
"rancher/calico-pod2daemon-flexvol:v3.17.2",
"rancher/cis-operator:v1.0.3",
"rancher/cluster-proportional-autoscaler:1.7.1",
"rancher/coredns-coredns:1.8.0",
"rancher/coreos-etcd:v3.4.14-rancher1",
"rancher/coreos-kube-state-metrics:v1.9.7",
"rancher/coreos-prometheus-config-reloader:v0.39.0",
"rancher/coreos-prometheus-operator:v0.39.0",
"rancher/externalip-webhook:v0.1.6",
"rancher/flannel-cni:v0.3.0-rancher6",
"rancher/coreos-flannel:v0.13.0-rancher1",
"rancher/fleet-agent:v0.3.4",
"rancher/fleet:v0.3.4",
"rancher/fluentd:v0.1.24",
"rancher/grafana-grafana:7.1.5",
"rancher/hyperkube:v1.20.4-rancher1",
"rancher/jimmidyson-configmap-reload:v0.3.0",
"rancher/k8s-dns-dnsmasq-nanny:1.15.2",
"rancher/k8s-dns-kube-dns:1.15.2",
"rancher/k8s-dns-node-cache:1.15.13",
"rancher/k8s-dns-sidecar:1.15.2",
"rancher/klipper-lb:v0.1.2",
"rancher/kube-api-auth:v0.1.4",
"rancher/kubectl:v1.20.4",
"rancher/kubernetes-external-dns:v0.7.3",
"rancher/cluster-proportional-autoscaler:1.8.1",
"rancher/library-busybox:1.31.1",
"rancher/library-busybox:1.32.1",
"rancher/library-nginx:1.19.2-alpine",
"rancher/library-traefik:1.7.19",
"rancher/local-path-provisioner:v0.0.11",
"rancher/local-path-provisioner:v0.0.14",
"rancher/local-path-provisioner:v0.0.19",
"rancher/log-aggregator:v0.1.7",
"rancher/istio-kubectl:1.5.10",
"rancher/metrics-server:v0.4.1",
"rancher/configmap-reload:v0.3.0-rancher4",
"rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
"rancher/nginx-ingress-controller:nginx-0.43.0-rancher1",
"rancher/opa-gatekeeper:v3.1.0-beta.7",
"rancher/openzipkin-zipkin:2.14.2",
"rancher/pause:3.2",
"rancher/plugins-docker:18.09",
"rancher/prom-alertmanager:v0.21.0",
"rancher/prom-node-exporter:v1.0.1",
"rancher/prom-prometheus:v2.18.2",
"rancher/prometheus-auth:v0.2.1",
"rancher/rancher-agent:v2.5.7",
"rancher/rancher-webhook:v0.1.0-beta9",
"rancher/rancher:v2.5.7",
"rancher/rke-tools:v0.1.72",
"rancher/security-scan:v0.1.14",
"rancher/security-scan:v0.2.2",
"rancher/shell:v0.1.6",
"rancher/sonobuoy-sonobuoy:v0.16.3",
"rancher/system-upgrade-controller:v0.6.2",
"docker.107421.xyz/rancher/backup-restore-operator:v1.0.3",
"docker.107421.xyz/rancher/calico-cni:v3.17.2",
"docker.107421.xyz/rancher/calico-ctl:v3.17.2",
"docker.107421.xyz/rancher/calico-kube-controllers:v3.17.2",
"docker.107421.xyz/rancher/calico-node:v3.17.2",
"docker.107421.xyz/rancher/calico-pod2daemon-flexvol:v3.17.2",
"docker.107421.xyz/rancher/cis-operator:v1.0.3",
"docker.107421.xyz/rancher/cluster-proportional-autoscaler:1.7.1",
"docker.107421.xyz/rancher/coredns-coredns:1.8.0",
"docker.107421.xyz/rancher/coreos-etcd:v3.4.14-rancher1",
"docker.107421.xyz/rancher/coreos-kube-state-metrics:v1.9.7",
"docker.107421.xyz/rancher/coreos-prometheus-config-reloader:v0.39.0",
"docker.107421.xyz/rancher/coreos-prometheus-operator:v0.39.0",
"docker.107421.xyz/rancher/externalip-webhook:v0.1.6",
"docker.107421.xyz/rancher/flannel-cni:v0.3.0-rancher6",
"docker.107421.xyz/rancher/coreos-flannel:v0.13.0-rancher1",
"docker.107421.xyz/rancher/fleet-agent:v0.3.4",
"docker.107421.xyz/rancher/fleet:v0.3.4",
"docker.107421.xyz/rancher/fluentd:v0.1.24",
"docker.107421.xyz/rancher/grafana-grafana:7.1.5",
"docker.107421.xyz/rancher/hyperkube:v1.20.4-rancher1",
"docker.107421.xyz/rancher/jimmidyson-configmap-reload:v0.3.0",
"docker.107421.xyz/rancher/k8s-dns-dnsmasq-nanny:1.15.2",
"docker.107421.xyz/rancher/k8s-dns-kube-dns:1.15.2",
"docker.107421.xyz/rancher/k8s-dns-node-cache:1.15.13",
"docker.107421.xyz/rancher/k8s-dns-sidecar:1.15.2",
"docker.107421.xyz/rancher/klipper-lb:v0.1.2",
"docker.107421.xyz/rancher/kube-api-auth:v0.1.4",
"docker.107421.xyz/rancher/kubectl:v1.20.4",
"docker.107421.xyz/rancher/kubernetes-external-dns:v0.7.3",
"docker.107421.xyz/rancher/cluster-proportional-autoscaler:1.8.1",
"docker.107421.xyz/rancher/library-busybox:1.31.1",
"docker.107421.xyz/rancher/library-busybox:1.32.1",
"docker.107421.xyz/rancher/library-nginx:1.19.2-alpine",
"docker.107421.xyz/rancher/library-traefik:1.7.19",
"docker.107421.xyz/rancher/local-path-provisioner:v0.0.11",
"docker.107421.xyz/rancher/local-path-provisioner:v0.0.14",
"docker.107421.xyz/rancher/local-path-provisioner:v0.0.19",
"docker.107421.xyz/rancher/log-aggregator:v0.1.7",
"docker.107421.xyz/rancher/istio-kubectl:1.5.10",
"docker.107421.xyz/rancher/metrics-server:v0.4.1",
"docker.107421.xyz/rancher/configmap-reload:v0.3.0-rancher4",
"docker.107421.xyz/rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
"docker.107421.xyz/rancher/nginx-ingress-controller:nginx-0.43.0-rancher1",
"docker.107421.xyz/rancher/opa-gatekeeper:v3.1.0-beta.7",
"docker.107421.xyz/rancher/openzipkin-zipkin:2.14.2",
"docker.107421.xyz/rancher/pause:3.2",
"docker.107421.xyz/rancher/plugins-docker:18.09",
"docker.107421.xyz/rancher/prom-alertmanager:v0.21.0",
"docker.107421.xyz/rancher/prom-node-exporter:v1.0.1",
"docker.107421.xyz/rancher/prom-prometheus:v2.18.2",
"docker.107421.xyz/rancher/prometheus-auth:v0.2.1",
"docker.107421.xyz/rancher/rancher-agent:v2.5.7",
"docker.107421.xyz/rancher/rancher-webhook:v0.1.0-beta9",
"docker.107421.xyz/rancher/rancher:v2.5.7",
"docker.107421.xyz/rancher/rke-tools:v0.1.72",
"docker.107421.xyz/rancher/security-scan:v0.1.14",
"docker.107421.xyz/rancher/security-scan:v0.2.2",
"docker.107421.xyz/rancher/shell:v0.1.6",
"docker.107421.xyz/rancher/sonobuoy-sonobuoy:v0.16.3",
"docker.107421.xyz/rancher/system-upgrade-controller:v0.6.2",
}
var CmiiSRSImageList = []string{

View File

@@ -253,7 +253,7 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) {
// 计算20:00的时间
now := time.Now()
targetTime := time.Date(now.Year(), now.Month(), now.Day(), 16, 55, 00, 0, now.Location())
targetTime := time.Date(now.Year(), now.Month(), now.Day(), 11, 3, 00, 0, now.Location())
duration := time.Duration(0)
@@ -278,7 +278,7 @@ func TestUpdateCmiiDeploymentImageTag(t *testing.T) {
//"cmii-uav-platform-dispatchh5": "5.6.0-062401",
//"cmii-uav-data-post-process": "5.6.0-062401",
//"cmii-uav-industrial-portfolio": "5.6.0-071801",
"cmii-uav-platform": "5.7.0",
"cmii-uav-industrial-portfolio": "5.7.0-31369-yunnan-082101",
//"cmii-uav-brain": "5.5.0",
//"cmii-uav-platform": "5.6.0-071702",
//"cmii-uas-lifecycle": "5.6.0-30403-071802",

View File

@@ -186,6 +186,8 @@ func (op *MinioOperator) UploadFile(bucketNameWithSuffix, filePath, fileName str
filePath += string(separator)
}
fileName = strings.TrimPrefix(fileName, "/")
bucketNameWithSuffix = strings.TrimPrefix(bucketNameWithSuffix, "/")
oldBucketName := bucketNameWithSuffix
realFileName := fileName
@@ -205,6 +207,6 @@ func (op *MinioOperator) UploadFile(bucketNameWithSuffix, filePath, fileName str
return false
}
log.InfoF("[UploadFile] - uploaded %s of size %d", filePath+fileName, n)
log.InfoF("[UploadFile] - uploaded [%s] of size %d", filePath+realFileName, n)
return true
}

View File

@@ -87,7 +87,23 @@ type UploadResultEntity struct {
// PullFromEntityAndSyncConditionally 根据ImageSyncEntity拉取特定的镜像然后上传到特定的目标机器(或者上传的minio中)
func (syncCondition *ImageSyncEntity) PullFromEntityAndSyncConditionally() (imageSyncResult *ImageSyncResult) {
imageSyncResult = &ImageSyncResult{}
imageSyncResult = &ImageSyncResult{
ProcedureSuccessImageList: nil,
DownloadResult: &DownloadResultEntity{
ErrorPullImageList: nil,
SuccessPullImageList: nil,
SuccessPullTxtFileLocalFullPath: "",
},
CompressResult: &CompressResultEntity{
ErrorGzipImageList: nil,
SuccessGzipImageList: nil,
GzipTxtFileLocalFullPath: "",
},
UploadResult: &UploadResultEntity{
ErrorUploadImageList: nil,
AllDownloadUrl: nil,
},
}
if (syncCondition.DownloadCondition.CmiiNameTagList == nil && syncCondition.DownloadCondition.FullNameImageList == nil) || (len(syncCondition.DownloadCondition.CmiiNameTagList) == 0 && len(syncCondition.DownloadCondition.FullNameImageList) == 0) {
// 没有指定特定的镜像,那么根据 ProjectVersion 或者从DEMO拉取镜像
@@ -101,7 +117,6 @@ func (syncCondition *ImageSyncEntity) PullFromEntityAndSyncConditionally() (imag
} else {
// 获取DEMO的镜像
C_DownloadCompressUploadFromDemo(syncCondition, imageSyncResult)
}
} else {
// 根据列表拉取镜像
@@ -171,11 +186,12 @@ func A_DownloadCompressUpload(syncEntity *ImageSyncEntity, syncResult *ImageSync
gzipLocalFolderPath := syncEntity.CompressCondition.GzipLocalFolder
localGzipFileListTxt := filepath.Join(gzipLocalFolderPath, AllGzipImageLocalFileName)
// Compress
if syncEntity.CompressCondition.ShouldCompressImageToGzip {
// remove file
localGzipFileListTxt := filepath.Join(gzipLocalFolderPath, AllGzipImageLocalFileName)
_ = os.Remove(localGzipFileListTxt)
// 找到已经存在的压缩文件,跳过
@@ -225,22 +241,24 @@ func A_DownloadCompressUpload(syncEntity *ImageSyncEntity, syncResult *ImageSync
// 压缩成功
allGzipFileFullNameList = append(allGzipFileFullNameList, gzipImageFileFullPath)
syncResult.CompressResult.SuccessGzipImageList = allGzipFileFullNameList
syncResult.CompressResult.ErrorGzipImageList = errorGzipImageList
// remove failed
fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool {
return slices.Contains(errorGzipImageList, imageName)
})
// write all gzipped file name to file
for _, gzipFileFullName := range allGzipFileFullNameList {
utils.AppendContentToFile(
strings.TrimPrefix(gzipFileFullName, gzipLocalFolderPath)+"\n",
localGzipFileListTxt,
)
}
}
syncResult.CompressResult.SuccessGzipImageList = allGzipFileFullNameList
syncResult.CompressResult.ErrorGzipImageList = errorGzipImageList
// remove failed
fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool {
return slices.Contains(errorGzipImageList, imageName)
})
// write all gzipped file name to file
for _, gzipFileFullName := range allGzipFileFullNameList {
utils.AppendContentToFile(
strings.TrimPrefix(strings.TrimPrefix(gzipFileFullName, gzipLocalFolderPath), "/")+"\n",
localGzipFileListTxt,
)
}
} else {
// 压缩为一个大的压缩包
gzipFileName := generateMonolithicGzipFileName(syncEntity)
@@ -251,10 +269,7 @@ func A_DownloadCompressUpload(syncEntity *ImageSyncEntity, syncResult *ImageSync
}
// write all gzipped file name to file
utils.AppendContentToFile(
utils.BeautifulPrintToString(fullNameList),
localGzipFileListTxt,
)
utils.AppendOverwriteListContentToFile(fullNameList, localGzipFileListTxt)
// remove failed
fullNameList = slices.DeleteFunc(fullNameList, func(imageName string) bool {
@@ -280,6 +295,21 @@ func A_DownloadCompressUpload(syncEntity *ImageSyncEntity, syncResult *ImageSync
// get gzip file name list
log.Info("[DCU] - UPLOAD OSS START !")
if !syncEntity.CompressCondition.ShouldCompressImageToGzip {
// 没有压缩指令 直接上传已有的内容
allFileInGzipFile, err := utils.ListAllFileInFolderWithFullPath(gzipLocalFolderPath)
if err != nil {
log.ErrorF("[DCU] - list all gzip file error !")
return
}
for _, f := range allFileInGzipFile {
if strings.HasSuffix(f, "tar.gz") {
syncResult.CompressResult.SuccessGzipImageList = append(syncResult.CompressResult.SuccessGzipImageList, f)
}
}
}
var errorUploadOssGzipNameList []string
var allDownloadUrl []string
@@ -288,20 +318,27 @@ func A_DownloadCompressUpload(syncEntity *ImageSyncEntity, syncResult *ImageSync
// 根据本地保存Gzip的目录路径提取到 相应的后缀 项目代码
// projectName / projectVersion
projectUniqueName := strings.TrimPrefix(gzipLocalFolderPath, image.OfflineImageGzipFolderPrefix)
projectUniqueName = strings.TrimSuffix(projectUniqueName, "/")
bucketNameWithPrefix := "cmlc-installation/" + projectUniqueName
log.InfoF("gzip file location in demo oss is %s", DefaultDemoEndpoint+"/"+bucketNameWithPrefix)
// upload gzip file list txt to demo
localGzipFileListTxt := syncResult.CompressResult.GzipTxtFileLocalFullPath
if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipLocalFolderPath, strings.TrimPrefix(localGzipFileListTxt, gzipLocalFolderPath)) {
log.ErrorF("upload of %s to demo oss error !", localGzipFileListTxt)
// 上传所有的压缩文件名称
if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipLocalFolderPath, AllGzipImageLocalFileName) {
log.ErrorF("upload of %s to demo oss error !", AllGzipImageLocalFileName)
}
// 上传所有的镜像名称
if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipLocalFolderPath, AllCmiiImageListLocalFileName) {
log.ErrorF("upload of %s to demo oss error !", AllCmiiImageListLocalFileName)
}
log.InfoF("upload all gzip file to demo oss !")
for _, gzipFileFullName := range syncResult.CompressResult.SuccessGzipImageList {
// SaveToGzipFile 返回的是全路径 归一化处理 gzip file name
gzipFileName := strings.TrimPrefix(gzipFileFullName, gzipLocalFolderPath)
gzipFileName = strings.TrimPrefix(gzipFileName, "/")
if !DefaultCmiiMinioOperator.UploadToDemo(bucketNameWithPrefix, gzipLocalFolderPath, gzipFileName) {
log.ErrorF("upload of %s to demo oss error !", gzipFileName)
errorUploadOssGzipNameList = append(errorUploadOssGzipNameList, gzipFileName)
@@ -315,11 +352,12 @@ func A_DownloadCompressUpload(syncEntity *ImageSyncEntity, syncResult *ImageSync
}
utils.AppendContentToFile(utils.BeautifulPrintToString(syncResult), filepath.Join(gzipLocalFolderPath, utils.TimeSplitFormatString()+".json"))
}
func generateMonolithicGzipFileName(syncEntity *ImageSyncEntity) string {
return strings.TrimPrefix(syncEntity.CompressCondition.GzipLocalFolder, image.OfflineImageGzipFolderPrefix)
return strings.TrimPrefix(syncEntity.CompressCondition.GzipLocalFolder, image.OfflineImageGzipFolderPrefix) + ".tar.gz"
}
// A_DownloadLoadTagUpload DLTU procedure ImageSync的另外一般流程需要支持 堡垒机(纯离线)的模式
@@ -442,14 +480,15 @@ func C_DownloadCompressUploadFromDemo(syncEntity *ImageSyncEntity, syncResult *I
}
}
syncEntity.CompressCondition.GzipLocalFolder = gzipFolderLocalPath
// get demo image version map
allCmiiImageFullNameList := buildAllCmiiImageNameListFromDemo(projectName)
// save all cmii image to file
allPullImageNameTxtFileName := filepath.Join(gzipFolderLocalPath, AllCmiiImageListLocalFileName)
utils.AppendOverwriteContentToFile(utils.BeautifulPrintToString(allCmiiImageFullNameList), allPullImageNameTxtFileName)
utils.AppendOverwriteListContentToFile(allCmiiImageFullNameList, allPullImageNameTxtFileName)
syncEntity.CompressCondition.GzipLocalFolder = gzipFolderLocalPath
syncEntity.DownloadCondition.FullNameImageList = allCmiiImageFullNameList
// save to result
syncResult.DownloadResult.SuccessPullTxtFileLocalFullPath = allPullImageNameTxtFileName
@@ -593,7 +632,7 @@ func C_DownloadCompressUploadDependency(shouldGzip bool, shouldOss bool, isRKE b
syncEntity := &ImageSyncEntity{
DownloadCondition: &DownloadEntity{
ShouldDownloadImage: false,
ShouldDownloadImage: true,
ProjectName: "",
ProjectVersion: "",
CmiiNameTagList: nil,
@@ -613,7 +652,25 @@ func C_DownloadCompressUploadDependency(shouldGzip bool, shouldOss bool, isRKE b
DirectHarborHost: "",
}
syncResult := &ImageSyncResult{}
syncResult := &ImageSyncResult{
ProcedureSuccessImageList: nil,
DownloadResult: &DownloadResultEntity{
ErrorPullImageList: nil,
SuccessPullImageList: nil,
SuccessPullTxtFileLocalFullPath: "",
},
CompressResult: &CompressResultEntity{
ErrorGzipImageList: nil,
SuccessGzipImageList: nil,
GzipTxtFileLocalFullPath: "",
},
UploadResult: &UploadResultEntity{
ErrorUploadImageList: nil,
AllDownloadUrl: nil,
},
}
utils.AppendOverwriteListContentToFile(fullImageNameList, filepath.Join(gzipFolderPrefix, AllCmiiImageListLocalFileName))
A_DownloadCompressUpload(syncEntity, syncResult)

View File

@@ -22,7 +22,7 @@ func TestFetchDependencyRepos_Middle(t *testing.T) {
func TestFetchDependencyRepos_RKE(t *testing.T) {
errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, false, true)
errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, true, false)
utils.BeautifulPrintListWithTitle(errorPullImageList, "errorPullImageList")
utils.BeautifulPrintListWithTitle(errorGzipImageList, "errorGzipImageList")
@@ -48,17 +48,28 @@ func TestLoadSplitDepGzipImageToTargetHarbor(t *testing.T) {
func TestPullFromEntityAndSyncConditionally(t *testing.T) {
// 创建一个模拟的sync对象用于测试函数的行为。这里需要根据你的实际需求来设置mock数据和预期结果。
sync := ImageSyncEntity{
CmiiNameTagList: []string{
//"cmii-uav-mqtthandler:5.4.0-bjdyt-052102",
DownloadCondition: &DownloadEntity{
ShouldDownloadImage: true,
ProjectName: "bjyd",
ProjectVersion: "",
CmiiNameTagList: []string{
//"cmii-uav-mqtthandler:5.4.0-bjdyt-052102",
},
FullNameImageList: nil,
DownloadAuthUserName: "",
DownloadAuthPassword: "",
},
FullNameImageList: nil,
ProjectVersion: "",
ProjectName: "bjtg",
DirectHarborHost: "harbor.wdd.io:8033",
ShouldDownloadImage: false,
ShouldCompressImageToGzip: false,
ShouldUploadToDemoMinio: false,
ShouldDirectPushToHarbor: true,
CompressCondition: &CompressEntity{
ShouldCompressImageToGzip: true,
ShouldGzipSplit: true,
GzipLocalFolder: "",
},
UploadCondition: &UploadEntity{
ShouldUploadToDemoMinio: true,
},
DirectHarborHost: "harbor.wdd.io:8033",
ShouldDirectPushToHarbor: false,
}
// 调用函数并获取结果。这里需要根据你的实际需求来验证返回的结果是否符合预期。
@@ -67,9 +78,6 @@ func TestPullFromEntityAndSyncConditionally(t *testing.T) {
utils.BeautifulPrint(result)
// 添加断言以检查函数的输出,例如:
if len(result.ErrorPullImageList) != 0 {
t.Errorf("Expected no error pulling images, got %v", result.ErrorPullImageList)
}
// ...其他验证逻辑...
}
@@ -110,7 +118,15 @@ func TestConcatAndUniformCmiiImage(t *testing.T) {
func TestImageSyncEntity_PullFromEntityAndSyncConditionally(t *testing.T) {
imageSyncEntity := ImageSyncEntity{
ProjectVersion: "5.4.0",
DownloadCondition: &DownloadEntity{
ShouldDownloadImage: true,
ProjectName: "",
ProjectVersion: "5.4.0",
CmiiNameTagList: nil,
FullNameImageList: nil,
DownloadAuthUserName: "",
DownloadAuthPassword: "",
},
DirectHarborHost: "36.134.71.138",
}

View File

@@ -330,13 +330,13 @@ func PullFromFullNameList(fullImageNameList []string) (errorPullImageList []stri
}
scanner := bufio.NewScanner(pullResult)
for scanner.Scan() {
//line := scanner.Text()
line := scanner.Text()
//if strings.Contains(line, "\"status\":\"Pulling from") {
// fmt.Println(line)
//}
//if strings.Contains(line, "Status: Image is up to date for") {
// fmt.Println(line)
//}
if strings.Contains(line, "Status: Image is up to date for") {
fmt.Println(line)
}
}
fmt.Println()
}
@@ -453,7 +453,7 @@ func SaveToGzipFile(imageFullName, folderPathPrefix string) (gzipOK bool, gzipIm
}
// 生成gzip压缩文件的全路径名称
gzipImageFileFullPath = folderPathPrefix + gzipImageFileFullPath
gzipImageFileFullPath = filepath.Join(folderPathPrefix, gzipImageFileFullPath)
log.InfoF("[ImageSaveToTarGZ] - start to save [%s] to [%s]", realImageTag, gzipImageFileFullPath)
@@ -491,71 +491,68 @@ func SaveToGzipFile(imageFullName, folderPathPrefix string) (gzipOK bool, gzipIm
// SaveImageListToGzipFile 将一个列表内的镜像全部压缩为一个tar.gz文件
func SaveImageListToGzipFile(imageFullNames []string, folderPathPrefix string, outputFileName string) (gzipOK bool, gzipFileFullPath string, errorGzipImageList []string) {
if len(imageFullNames) == 0 {
log.Error("[SaveImagesToGzipFile] - no images provided")
return false, "", errorGzipImageList
}
// 确保输出文件路径
if err := os.MkdirAll(filepath.Dir(folderPathPrefix), os.ModePerm); err != nil {
log.ErrorF("[SaveImagesToGzipFile] - failed to create directory: %s", err)
return false, "", errorGzipImageList
}
gzipFileFullPath = filepath.Join(folderPathPrefix, outputFileName)
log.InfoF("[SaveImagesToGzipFile] - start saving images to [%s]", gzipFileFullPath)
// 删除旧的Gzip文件
if err := os.Remove(gzipFileFullPath); err != nil && !os.IsNotExist(err) {
log.ErrorF("[SaveImagesToGzipFile] - failed to remove old gzip file: %s", err)
return false, "", errorGzipImageList
}
tarFile, err := os.Create(gzipFileFullPath)
if err != nil {
log.ErrorF("[SaveImagesToGzipFile] - error creating gzip file: %s", err)
return false, "", errorGzipImageList
}
defer tarFile.Close()
gw, err := pgzip.NewWriterLevel(tarFile, pgzip.DefaultCompression)
if err != nil {
log.ErrorF("[SaveImagesToGzipFile] - pgzip writer creation error: %s", err)
return false, "", errorGzipImageList
}
defer gw.Close()
errorGzipImageList = []string{}
for _, imageFullName := range imageFullNames {
imageGetByName := GetByName(imageFullName)
if imageGetByName == nil {
log.WarnF("[SaveImagesToGzipFile] - %s not exists, skipping", imageFullName)
errorGzipImageList = append(errorGzipImageList, imageFullName)
continue
}
imageSaveTarStream, err := apiClient.ImageSave(context.TODO(), imageGetByName.RepoTags)
if err != nil {
log.ErrorF("[SaveImagesToGzipFile] - image save error for %s: %s", imageFullName, err)
errorGzipImageList = append(errorGzipImageList, imageFullName)
continue
}
if _, err := io.Copy(gw, imageSaveTarStream); err != nil {
log.ErrorF("[SaveImagesToGzipFile] - failed to copy tar archive for %s to gzip writer: %s", imageFullName, err)
errorGzipImageList = append(errorGzipImageList, imageFullName)
continue
}
}
if err := gw.Close(); err != nil {
log.ErrorF("[SaveImagesToGzipFile] - error closing gzip writer: %s", err)
return false, "", errorGzipImageList
}
log.InfoF("[SaveImagesToGzipFile] - successfully saved images to [%s]", gzipFileFullPath)
//if len(imageFullNames) == 0 {
// log.Error("[SaveImagesToGzipFile] - no images provided")
// return false, "", errorGzipImageList
//}
//
//// 确保输出文件路径
//gzipFileFullPath = folderPathPrefix + outputFileName
//if err := os.MkdirAll(filepath.Dir(gzipFileFullPath), os.ModePerm); err != nil {
// log.ErrorF("[SaveImagesToGzipFile] - failed to create directory: %s", err)
// return false, "", errorGzipImageList
//}
//
//log.InfoF("[SaveImagesToGzipFile] - start saving images to [%s]", gzipFileFullPath)
//
//// 删除旧的Gzip文件
//if err := os.Remove(gzipFileFullPath); err != nil && !os.IsNotExist(err) {
// log.ErrorF("[SaveImagesToGzipFile] - failed to remove old gzip file: %s", err)
// return false, "", errorGzipImageList
//}
//
//tarFile, err := os.Create(gzipFileFullPath)
//if err != nil {
// log.ErrorF("[SaveImagesToGzipFile] - error creating gzip file: %s", err)
// return false, "", errorGzipImageList
//}
//defer tarFile.Close()
//
//gw, err := pgzip.NewWriterLevel(tarFile, pgzip.DefaultCompression)
//if err != nil {
// log.ErrorF("[SaveImagesToGzipFile] - pgzip writer creation error: %s", err)
// return false, "", errorGzipImageList
//}
//defer gw.Close()
//
//for _, imageFullName := range imageFullNames {
// imageGetByName := GetByName(imageFullName)
// if imageGetByName == nil {
// log.WarnF("[SaveImagesToGzipFile] - %s not exists, skipping", imageFullName)
// continue
// }
//
// imageSaveTarStream, err := apiClient.ImageSave(context.TODO(), imageGetByName.RepoTags)
// if err != nil {
// log.ErrorF("[SaveImagesToGzipFile] - image save error for %s: %s", imageFullName, err)
// continue
// }
//
// if _, err := io.Copy(gw, imageSaveTarStream); err != nil {
// log.ErrorF("[SaveImagesToGzipFile] - failed to copy tar archive for %s to gzip writer: %s", imageFullName, err)
// continue
// }
//
//}
//
//if err := gw.Close(); err != nil {
// log.ErrorF("[SaveImagesToGzipFile] - error closing gzip writer: %s", err)
// return false, "", errorGzipImageList
//}
//
//log.InfoF("[SaveImagesToGzipFile] - successfully saved images to [%s]", gzipFileFullPath)
return true, gzipFileFullPath, errorGzipImageList
}
func CmiiImageMapToFullNameList(cmiiImageVersionMap map[string]string) (fullImageNameList []string) {

View File

@@ -283,3 +283,20 @@ func TestImageNameToTargetImageFullName(t *testing.T) {
utils.BeautifulPrint(result)
}
func TestSaveImageListToGzipFile(t *testing.T) {
allImageList := []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
}
gzipOK, gzipFileFullPath, errorGzipImageList := SaveImageListToGzipFile(allImageList, "/home/wdd/IdeaProjects/ProjectOctopus/agent-operator/image", "test.tar.gz")
assert.Equal(t, gzipOK, true, "gzip image list to single file failed !")
log.Info("gzip file is " + gzipFileFullPath)
utils.BeautifulPrint(errorGzipImageList)
}

View File

@@ -207,3 +207,5 @@
2024-07-18-18-30-21 uavcloud-demo cmii-uav-brain 5.5.0
2024-07-19-09-29-48 uavcloud-demo cmii-uav-platform-armypeople 5.7.0 5.7.0-29668-071901
2024-07-19-09-54-00 uavcloud-demo cmii-uav-platform-armypeople 5.7.0-29668-071901 5.7.0
2024-08-20-17-36-40 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0-31369-yunnan-081901 5.7.0-31369-yunnan-082001
2024-08-21-11-03-00 uavcloud-demo cmii-uav-industrial-portfolio 5.7.0-31369-yunnan-082001 5.7.0-31369-yunnan-082101