[Agent][Deploy] - add zjyd

This commit is contained in:
zeaslity
2024-08-20 09:35:32 +08:00
parent 1828116d8a
commit 40b540f082
252 changed files with 21873 additions and 25482 deletions

View File

@@ -0,0 +1,114 @@
#!/bin/bash
local_host_ip=20.4.16.200
# all_host_ip_list=(20.4.13.81 20.4.13.140 20.4.13.92 20.4.13.80)
all_host_ip_list=(20.4.13.80)
pass=V2ryStr@ngPss
copy_ssh_key_to_master(){
scp /root/.ssh/id_rsa root@20.4.13.81:/root/.ssh/id_rsa
scp /root/.ssh/id_rsa.pub root@20.4.13.81:/root/.ssh/id_rsa.pub
}
copy_ssh_key_to_master
install_nfs_server_suffix(){
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "systemctl start rpcbind && systemctl enable rpcbind && systemctl start nfs-server && systemctl enable nfs-server"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "mkdir -p /var/lib/docker/nfs_data && chmod 777 /var/lib/docker/nfs_data"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "echo \"/var/lib/docker/nfs_data *(rw,no_root_squash,no_all_squash,sync)\" >> /etc/exports"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "systemctl restart rpcbind && systemctl restart nfs-server"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "rpcinfo -p localhost"
}
# install_nfs_server_suffix
install_all_demand_softwares(){
local host
local dep_file_list=(tar-1.32-4.oe2003sp4.x86_64.rpm common_tool-openEuler-20.03-LTS-SP4.tar.gz nfs_utils-openEuler-20.03-LTS-SP4.tar.gz nginx-openEuler-20.03-LTS-SP4.tar.gz ntp-openEuler-20.03-LTS-SP4.tar.gz)
for host in ${all_host_ip_list[@]}
do
echo "current host is ${host}"
ssh -i /root/.ssh/id_rsa root@${host} "echo yes"
ssh -i /root/.ssh/id_rsa root@${host} "systemctl start rpcbind && systemctl enable rpcbind && systemctl start ntpd && systemctl enable ntpd"
# ssh -i /root/.ssh/id_rsa root@${host} "mkdir -p /root/wdd/dep/"
# for dep in ${dep_file_list[@]}
# do
# echo "dep file is ${dep}"
#
# ssh -i /root/.ssh/id_rsa root@${host} "wget http://20.4.16.200:9000/octopus/euler/${dep} -O /root/wdd/dep/${dep}"
# ssh -i /root/.ssh/id_rsa root@${host} "rpm -ivh /root/wdd/dep/tar*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf common_tool-openEuler-20.03-LTS-SP4.tar.gz && cd ./common_tool && rpm -ivh --force ./*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf nfs_utils-openEuler-20.03-LTS-SP4.tar.gz && cd ./nfs_utils && rpm -ivh --force ./*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf ntp-openEuler-20.03-LTS-SP4.tar.gz && cd ./ntp && rpm -ivh --force ./*.rpm"
#
# done
echo ""
done
}
# install_all_demand_softwares
test_base_command_exits() {
local base_command_list=(ifconfig mtr vgdisplay nslookup vim htop tar unzip iftop curl wget netstat git zsh)
local command
for command in "${base_command_list[@]}"; do
if command -v "$command" &>/dev/null; then
echo "$command exists"
else
echo "ERROR $command does not exist!"
fi
echo ""
done
}
test_service_exists(){
local base_service_list=(ntpd chronyd nginx nfs-server rpcbind docker)
local service
for service in "${base_service_list[@]}"; do
if ! systemctl list-unit-files | grep "$service.service"; then
echo "ERROR $service.service does not exist!"
fi
echo ""
done
}
# test_base_command_exits
# test_service_exists
change_host_name(){
hostnamectl set-hostname master-node
}
install_ssh_key(){
echo "" >> /root/.ssh/authorized_keys
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL9S6CSAjTFe2fy4bAIfqu90ft6E+GBRvS59kno6LDeAbqUQNYf9hEnIg07Ee/x5DlnYE0S3Ykv3WCHuVyBH2zANnC0P87SqphDGmoqdqF3r6uDaCr4lBsUqEai9X2q6dyjZj6ym+r4zQhMApNDzbhcyfKQ54tKFylGIdx6siyktuU/VbOzWc6G8r+BfFsQpMCA1ihmCY1jGjsKPqFlZGLeTrlBb1Zk0OV+GtDhlf/t0cd0kRPJoydm2juTXrZO+tFmf9turfKZsBnRYKtQBLJG5mF1hsjIqo8DHr+PUL2wRrSxEhGTZiJL4rNJo/kHhKXXsomc5RM/AnfgAfxrLlH zeasl@DESKTOP-K2F9GG3 ">> /root/.ssh/authorized_keys
echo "" >> /root/.ssh/authorized_keys
}
install_octopus_server_offline(){
bash <(curl -sL http://20.4.16.200:9000/octopus/init-script-wdd.sh) --url http://20.4.16.200:9000/octopus --agent-install --offline
bash <(curl -sL http://20.4.16.200:9000/octopus/init-script-wdd.sh) --url http://20.4.16.200:9000/octopus --agent-remove --offline
cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back
sed -i "s/StrictModes yes/StrictModes no/g" /etc/ssh/sshd_config
sed -i "s/AllowTcpForwarding no/AllowTcpForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/AllowAgentForwarding no/AllowAgentForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/PermitTunnel no/PermitTunnel yes/g" /etc/ssh/sshd_config
systemctl restart sshd
}
machinId(){
20.4.13.81 Chongqing-amd64-01 354d6db5354d6db5354d6db5354d6db5
20.4.13.140 Chongqing-amd64-02 2a216db5354d6db5354d6db5354d6db5
20.4.13.92 Chongqing-amd64-03 3ca26db5354d6db5354d6db5354d6db5
20.4.13.80 Chongqing-amd64-04 4ea1d6db5354d6db5354d6db5354d6db
}

View File

@@ -0,0 +1,48 @@
#!/bin/bash
mount_disk_to_var(){
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
local VG_NAME=datavg
local disk_name=/dev/vdb
local mount_dir=/var/lib/docker
echo "n
p
t
8e
w
" | fdisk ${disk_name}
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} ${disk_name}1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
# resize2fs /dev/mapper/${VG_NAME}-lvdata
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
local selffstab="/dev/mapper/${VG_NAME}-lvdata ${mount_dir} xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
xfs_growfs /dev/mapper/${VG_NAME}-lvdata
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
}
mount_disk_to_var

View File

@@ -0,0 +1,274 @@
version: '2.3'
services:
log:
image: goharbor/harbor-log:v2.9.0
container_name: harbor-log
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
volumes:
- /var/log/harbor/:/var/log/docker/:z
- type: bind
source: ./common/config/log/logrotate.conf
target: /etc/logrotate.d/logrotate.conf
- type: bind
source: ./common/config/log/rsyslog_docker.conf
target: /etc/rsyslog.d/rsyslog_docker.conf
ports:
- 127.0.0.1:1514:10514
networks:
- harbor
registry:
image: goharbor/registry-photon:v2.9.0
container_name: registry
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
- type: bind
source: /var/lib/docker/harbor-data/secret/registry/root.crt
target: /etc/registry/root.crt
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "registry"
registryctl:
image: goharbor/harbor-registryctl:v2.9.0
container_name: registryctl
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
env_file:
- ./common/config/registryctl/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
- type: bind
source: ./common/config/registryctl/config.yml
target: /etc/registryctl/config.yml
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "registryctl"
postgresql:
image: goharbor/harbor-db:v2.9.0
container_name: harbor-db
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/database:/var/lib/postgresql/data:z
networks:
harbor:
env_file:
- ./common/config/db/env
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "postgresql"
shm_size: '1gb'
core:
image: goharbor/harbor-core:v2.9.0
container_name: harbor-core
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
env_file:
- ./common/config/core/env
restart: always
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/ca_download/:/etc/core/ca/:z
- /var/lib/docker/harbor-data/:/data/:z
- ./common/config/core/certificates/:/etc/core/certificates/:z
- type: bind
source: ./common/config/core/app.conf
target: /etc/core/app.conf
- type: bind
source: /var/lib/docker/harbor-data/secret/core/private_key.pem
target: /etc/core/private_key.pem
- type: bind
source: /var/lib/docker/harbor-data/secret/keys/secretkey
target: /etc/core/key
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
harbor:
depends_on:
- log
- registry
- redis
- postgresql
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "core"
portal:
image: goharbor/harbor-portal:v2.9.0
container_name: harbor-portal
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
-
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
volumes:
- type: bind
source: ./common/config/portal/nginx.conf
target: /etc/nginx/nginx.conf
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "portal"
jobservice:
image: goharbor/harbor-jobservice:v2.9.0
container_name: harbor-jobservice
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
env_file:
- ./common/config/jobservice/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/job_logs:/var/log/jobs:z
- type: bind
source: ./common/config/jobservice/config.yml
target: /etc/jobservice/config.yml
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- core
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "jobservice"
redis:
image: goharbor/redis-photon:v2.9.0
container_name: redis
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /var/lib/docker/harbor-data/redis:/var/lib/redis
networks:
harbor:
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "redis"
proxy:
image: goharbor/nginx-photon:v2.9.0
container_name: nginx
extra_hosts:
- "harbor.wdd.io:20.4.16.200"
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
volumes:
- ./common/config/nginx:/etc/nginx:z
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
ports:
- 8033:8080
depends_on:
- registry
- core
- portal
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "proxy"
networks:
harbor:
external: false

View File

@@ -0,0 +1,180 @@
nodes:
- address: 20.4.13.81
user: root
role:
- controlplane
- etcd
- worker
internal_address: 20.4.13.81
labels:
ingress-deploy: true
- address: 20.4.13.140
user: root
role:
- worker
internal_address: 20.4.13.140
- address: 20.4.13.92
user: root
role:
- worker
internal_address: 20.4.13.92
labels:
mysql-deploy: 'true'
authentication:
strategy: x509
sans:
- "20.4.13.81"
private_registries:
- url: 20.4.13.81:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
#ssh_key_path: /root/.ssh/id_ed25519
ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens3
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: { }
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"

View File

@@ -0,0 +1,45 @@
package main
//
//import (
// "wdd.io/agent-common/utils"
// cmii_operator "wdd.io/agent-operator"
//)
//
//var realConfig = `apiVersion: v1
//clusters:
//- cluster:
// certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1ERXhPREEyTURZeU5Gb1hEVE14TURFeE5qQTJNRFl5TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS2ZNCjFjTjBNUnhUTkRGdEZxcnRIZ0RPM29SV0dicmVob3VFcDJ3VUVRbU8yRUFyZDdkMUFReTJsSm9WM0RUVmhXbUwKcUFUOFcxaWRaS0x0Wm5mNjEva3JPeDd0U2lJeU4xa1ErN3NYRUhnTjVMc01EOVlKcndpUFdFY2FXdU9HVmI1aApMWDZWOTRjN0U5UlFDOENtd09iSkRCNG45ZE8zcDVlTDJHaFRpMkNrRWt3ZkRPR0tEL1IxeUNaK0tFcDRWWlplCnpwcnUzRG5zOUNqZHVOT1VBWTZzUGxjazNvdEdIVnhnRC9IRlRjUEhNbGhvUVQ4dmNDOTZwc0FtYXZPR1BZQ0YKa3RtN0VWYkZDOHN5Q1BMT3AwWWhTWHRkbGtKaC9UWHBaM0hSUWJxSzVPNXR4K1dGL05qMGJVc202ZldSMzZWQgpKQVVscUJIeFhSTzhGTFNrVHkwQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKeWZ2T3hHVVYvT2wybGRQNnYxeWFSTkd5RVkKWkVxTmM2Y29LSklsd0VQNUxNYzdZNGFReWorZCtVTE4zYmIrOXZsZXdHamluTHRrUW5HZ1R3Q3pKTU5ZNlNJNQo2NzJGZEtQTE85Szdpalhway9qRE9FVHJWS25aMXJBTytOUVBmSVhpcXQ3Y1RyVHlaVzdKTVl3emZNa2VlTGErCnREdmY1Rm5vQTBLN2U3a0ZXNTBpN2pXcGh4RXRMNEJpNzAwNnU4NEpqTU5weVp1MzhKMjFXZkR1RjBoU0NQREgKS0x4cnZIZ0FOYzJWU1c2L3JPaVVCQjdiV0JkcWcyQUNVRWZwN0V3UGs2S1BsdGNiNTJtdFhCU2xiQ3pRWWw4UQpmNmVGRFIrbnRjeXNGbU1FMFI3M1lNSHJwR0dGdlduSDVaTmEyVEJYdHpwN2tNNkVPREE5a2R4WkI1dz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
// server: https://192.168.11.170:16443
// name: kubernetes
//contexts:
//- context:
// cluster: kubernetes
// user: kubernetes-admin
// name: kubernetes-admin@kubernetes
//current-context: kubernetes-admin@kubernetes
//kind: Config
//preferences: {}
//users:
//- name: kubernetes-admin
// user:
// client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4ekNDQWR1Z0F3SUJBZ0lKQU9SWThQZlhadWQyTUEwR0NTcUdTSWIzRFFFQkN3VUFNQlV4RXpBUkJnTlYKQkFNVENtdDFZbVZ5Ym1WMFpYTXdIaGNOTWpJd01URTRNRFl6TmpRMFdoY05Nekl3TVRFMk1EWXpOalEwV2pBMApNUmN3RlFZRFZRUUtEQTV6ZVhOMFpXMDZiV0Z6ZEdWeWN6RVpNQmNHQTFVRUF3d1FhM1ZpWlhKdVpYUmxjeTFoClpHMXBiakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPNTZ0ZG51M24rWUsxM3oKZmNlTzNiSmhBL2J0SGpoQXpvRnNObmZjeEY3dlRTZGczSUxySmVNVkFGbG50MHpUL2xacFBlU0ZUN25iL1g1Ygo4RjErSHA2dVR0b0hRVGJHR2VzbEprdkpFMjB3OGJ0Z3VrdlNmTnROOS9NNlFTWWkvTGlHeTZpd2kveGdBVUtKClFtVW1vZmhZSHNKMllFbXJCcExOVFhtenl2a2lUTlJZVC9iNlJJRzNiT3lIVm1Lc1cwQkNQNVZTTFJsLzErZlMKM0dCUUZ2UTNXdTdmVWlzMW9DSXhsc1k5V2VJUmpGOWJDbWtKNnZsT3BWbGlsTlA0cEtSSnl4aXNBNzExNENNWAprRGJvRFBXb2lxMktubzYveXI2L0xwMktsVVVSa1JhQklodEl5eXV2TldPbjhiTW90SUpCNWNOems4UkxYTm5TCklPZEtMVDhDQXdFQUFhTW5NQ1V3RGdZRFZSMFBBUUgvQkFRREFnV2dNQk1HQTFVZEpRUU1NQW9HQ0NzR0FRVUYKQndNQ01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ1lwVk9NemlGRUFta1A4S3B2ZWttR3laVGV3dzQreVhyUwo3TEpoWTdTR2pGY210ZldMSW9PWEhmWmZlZWNsN3M5Snh1SytPZlhqU0d0UU9jWXk0WHo5OVFWY2FRandJMEg5Cnc3aWJiYUw3M093RGZrRDMrdlNhME9ZRWZKSFlsNXErQXBnQVpLVWRWazMvZHpJSmhRR0V6L0UxcjdYTlNabDUKL1hOT3pwbzl0VHV2dDAxRlllV0RMN01DeWZGRHFTelpQdnNyWW81bDFiTE5yeEZHb1dvSTdUMlJzR205VXJyYwoyTy84R2hMYTkwZ2tLeE9JTEpYdlJCY2RrOUN4N01ROGFGVHBuSmtPMXJzVzUxMTFoTG5hNm9WRHhISlVrbjRkCmNhODFDV3R1Yk44dkpSYlFwVmkySTJ5K3ljZ3lrNTMzR21GQXNVS3dkdm5rVjNqTVJVbFYKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
// client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBN25xMTJlN2VmNWdyWGZOOXg0N2RzbUVEOXUwZU9FRE9nV3cyZDl6RVh1OU5KMkRjCmd1c2w0eFVBV1dlM1ROUCtWbWs5NUlWUHVkdjlmbHZ3WFg0ZW5xNU8yZ2RCTnNZWjZ5VW1TOGtUYlREeHUyQzYKUzlKODIwMzM4enBCSmlMOHVJYkxxTENML0dBQlFvbENaU2FoK0ZnZXduWmdTYXNHa3MxTmViUEsrU0pNMUZoUAo5dnBFZ2JkczdJZFdZcXhiUUVJL2xWSXRHWC9YNTlMY1lGQVc5RGRhN3Q5U0t6V2dJakdXeGoxWjRoR01YMXNLCmFRbnErVTZsV1dLVTAvaWtwRW5MR0t3RHZYWGdJeGVRTnVnTTlhaUtyWXFlanIvS3ZyOHVuWXFWUlJHUkZvRWkKRzBqTEs2ODFZNmZ4c3lpMGdrSGx3M09UeEV0YzJkSWc1MG90UHdJREFRQUJBb0lCQVFDdTE5YldGbFNZdGNjdAoxYVJsRi9DZ3BKSlVpcHA2WWNGRmtFSUs5UmdnQmxESnl6RkE1d2hiQ2YyOGp0Y01BKzFZQzBidWNYTDNjRHZWClZiRFB5dlRHSUVQOWhBNGpDM0RiUHR4cCtkMDlWQUlYQUI3MkVqZXFUZXE1TC8rdDV6N2tSeWV2NE9oeE95NFIKU3pNYm1BeHVXS1VNcTkrQ2cxcUpiTzRkaVYwSjg5cUtidExsclFCeDFxcHNnUjNES1VhVGVNKzVpeFYyQ1Y1bApSNDV4aU43NWRrSkpaZlY2UUV5K3V2UVd0VHk4NUN3R1U2T2hjOXA4d2s0MmFrQS9qM05FTUZiTjdDaDFKbi9RCjRhNUJpMituRUE4dGVvV2FRSzdoeU5CRENWbTFsamFjaFFveGRSNGhCWVUxdkhTbkt4a0c4bDA1K1BpRTZmZFkKaUtyemhGR0JBb0dCQVBwOStKTExzZXJ6dFQ4a2VLU2FSMXBMOHB5MTQ3cmdjdEVhckxJL2ZqY1VMU3c3OUk3UAovWWhIWnhmdm9TZEZ2QTZwNy81eHFCRitaNTM5L1NKNDlLeWFOdGNJbW01UTZKSW9aRGgzWmVVS3lMKzA1YTdRCkNqMU1wZ2hKMlZDT2VPamNxd0NVQkFhcjNWSjd0cXRxRVFCQk9jMnlWU3dzbU5wclMyYmU1S3RCQW9HQkFQTzUKSG9ZVTBMK2tzdzJKUVM5ODF1ZWtrbDMzR1ZWQ2dPUFdGWThhR3VGRGt3Sm84WGk2TmhtKzh2TjlNaGg3WkYzeQpTU3E1U2RJd01pR0IvKzVJaWp1V25DbWszY2RPdGU0VFBsZHFvdjc3Q1FUUmxPNWJCekR0L1VqYVBBam5GS0FpClg4K0V6NUVXOXFSclN2ZXplZHFDRVRBVDhRWThqNk1WY0VCRW96aC9Bb0dCQUphcVRHZ25RdVdhSHF0VENZbWcKRGtqZW81Zmt3NHcwMG5xNWU2UmZFbENZdnk3N0JQY2RYVmFwOC9WdXVkVEFXZ1BMN1VGekpXOFlROFRBNzQvYgpodmVHYm5QYWhlRFNvNEM5OE1JUjl1VFVIcmxJV2xwU1ljWkxJeGFiTEs0S2MrbEVTVXE0dk04eWNwWFpPWjlTCjFkVDhab00xdjRzcGErcjhYRWNNekNmQkFvR0JBSXVuaXI4SDFHbk1CVEYvY1pPMWRDczkyUVR3MzFwRWhqaUgKWnNrZUMwTURCb3o5OTBmWFk4S3k4T0htM2pxN0VkTG5UMWVrM3BFTFB0NkdjRkZvelpUQmczQTFZVU9nYlkwagpCN2p0aU1LVXRDRkh1cEF1SnR1NXMwWDRqeWdHeVlITTBKdkhuV3lrL09WUCthQWYvblhmeTl1QndiMXlIRmcxCm82R2Y4dXNmQW9HQkFKeGlQcGdDODJPckoxazE3V3dyOFI2ZXpxR2VYb0JPRzFlOEN6ZG1UbWFrN3prWDJJelEKSTVjT3dGaTlnREhTbUVMa0dYZnRHZ01EcXF1VHVLdS9OdW9DQS94Z2FrdTQvVHplNktqbzRjc0NDTmFza3VrRQozYnhwSnU5cElYRU5tMXVuNXBZRy90QTF0V1Rtc3dnRjY1anc2RFpTQUFUTFZMSXg3RVRDR0RlOQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=`
//
//func main() {
// k8sOperator := cmii_operator.CmiiK8sOperator{}
// k8sOperator.BuildCurrentClientFromConfig(realConfig)
// realNamespace := "ingress-nginx"
//
// // get all pods
// allInterface := k8sOperator.PodAllInterface(realNamespace)
//
// for _, deploymentInterface := range allInterface {
// utils.BeautifulPrint(deploymentInterface)
// }
//
// // restart all backend
// cmii_operator.RestartCmiiBackendDeployment(realNamespace)
// cmii_operator.RestartCmiiFrontendDeployment(realNamespace)
//
//}

View File

@@ -0,0 +1,2 @@
projectId 1751084188582440961