This commit is contained in:
zeaslity
2025-03-14 13:48:54 +08:00
parent 77cafaf0a1
commit d8e2c67e36
38 changed files with 1051 additions and 39 deletions

View File

@@ -0,0 +1,29 @@
关于中移凌云无法实现一键部署的相关原因说明
1. 系统固有原因:
1.1 系统复杂,微服务数量众多,涉及到各服务之间配置及依赖关系复杂,无法通过脚本处理
1.2 基础设施复杂涉及多主机k8s集群部署等部署流程链路长故障点可能发生在任何意想不到的地方比如涉及到客户服务器环境、网络条件等客观情况。
1.3 涉及跨主机,之间的流程编排,一键部署难度非常困难
2. 客观原因
2.1 客户本地化部署环境完全不可控
2.1.1 即使操作系统相同,由于版本的不同,就会导致部署依赖的文件不同
2.1.2 客户网络底层架构不同会导致k8s跨主机的网络插件产生不可抗力的异常情况必须人工介入排查
2.1.3 即使对于不同操作系统不同版本 都做了相应的适配工作,实际情况是,仍然产生意料之外的异常,需要人工介入
3. 与竞品比较
3.1 大疆同类产品也绝无一键部署之宣传
3.2 微服务架构,暂未听说过能够实现一键部署的先例
4. 工作重心原因
4.1 低空经济中心的主要职责在于打磨优秀的产品,无法将巨量的时间用于进行交付流程的优化工作
4.2 希望技术支撑的交付团队,能够深感工作之重要,自行探索打造快速交付部署的“一键部署”流程
5. 已有部署流程优化工作
5.1 针对部署流程中固化的的步骤,我们已经努力实现了部分的部署流程一键化工作
5.2 docker的离线安装流程-可以实现屏蔽操作系统差异,支持国产化环境部署等
5.3 k8s集群安装流程-可以实现一键启动k8s集群
5.4 harbor安装流程-可以实现一键安装启动harbor
5.5 MySQL Redis Mongo Emqx RabbitMQ - 可以实现一键运行启动中间件的工作

View File

@@ -4,7 +4,7 @@
rm -f /usr/local/bin/agent-wdd
rm -f /usr/local/bin/test-shell.sh
wget https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_amd64 -qO /usr/local/bin/agent-wdd
wget https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_amd64 -O /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd
@@ -17,9 +17,9 @@ export oss_url_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/downloadfil
# export oss_url_prefix=http://42.192.52.227:9000/octopus
wget ${oss_url_prefix}/docker-amd64-20.10.15.tgz
wget ${oss_url_prefix}/docker-compose-linux-x86_64-v2.18.0
wget ${oss_url_prefix}/docker-compose-v2.18.0-linux-amd64
wget ${oss_url_prefix/harbor-offline-installer-v2.9.0.tgz
wget ${oss_url_prefix}/harbor-offline-installer-v2.9.0.tgz
wget ${oss_url_prefix}/rke_linux-amd64
wget ${oss_url_prefix}/kubectl-1.20.4-amd64

View File

@@ -15,6 +15,9 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base selinux
/usr/local/bin/agent-wdd base sysconfig
/usr/local/bin/agent-wdd zsh
# 首先需要下载所有的依赖!
/usr/local/bin/agent-wdd base docker local
/usr/local/bin/agent-wdd base dockercompose local
@@ -22,15 +25,26 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
# 仅在主节点执行
/usr/local/bin/agent-wdd base docker config
/usr/local/bin/agent-wdd base harbor install
# 批量执行命令
host_list=(
172.16.100.50
172.16.100.56
172.16.100.57
172.16.100.58
172.16.100.61
)
//
host_list=(
172.16.100.62
172.16.100.51
172.16.100.52
172.16.100.53
172.16.100.54
172.16.100.55
172.16.100.56
172.16.100.57
172.16.100.58
@@ -40,20 +54,47 @@ host_list=(
)
for server in "${host_list[@]}";do
echo "current ip is $server"
# ssh root@${server} "echo yes"
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
ssh root@${server} "curl -s http://172.24.65.135"
echo ""
done
# 复制 同步文件
export server=172.16.100.62
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
# 磁盘初始化
ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
scp /root/wdd/disk.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/disk.sh"
# 复制文件-docker
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
# 批量执行agent-wdd的命令
ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
ssh root@${server} "/usr/local/bin/agent-wdd base selinux"
ssh root@${server} "/usr/local/bin/agent-wdd base sysconfig"
ssh root@${server} "/usr/local/bin/agent-wdd base docker local"
ssh root@${server} "/usr/local/bin/agent-wdd base dockercompose local"
# 仅在主节点执行
/usr/local/bin/agent-wdd base docker config
# 下发docker的配置
scp /etc/docker/daemon.json root@${server}:/etc/docker/daemon.json
ssh root@${server} "cat /etc/docker/daemon.json"
ssh root@${server} "systemctl restart docker"
ssh root@${server} "docker info"
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0

View File

@@ -1,6 +1,6 @@
export harbor_host=192.168.35.71:8033
export harbor_host=172.16.100.55:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -8,7 +8,7 @@ env:
value: "eth0"
# 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens160
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens18
# 删除所有的calico pod

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=jxyd
namespace=gsyd-app
# 优雅地处理Deployment缩容
scale_deployments() {

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=jlyd
namespace=gsyd-app
install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
@@ -44,9 +44,9 @@ backup_all_stateful_sets() {
}
install_yq
# backup_all_deployment
# backup_all_service
# backup_all_stateful_sets
backup_all_deployment
backup_all_service
backup_all_stateful_sets
backup_all_configmap
# https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64

View File

@@ -5,7 +5,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq
export name_space=jxyd
export name_space=ingress-nginx
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force
@@ -13,4 +13,6 @@ kubectl get pods -n $name_space -o json | jq -r '.items[] | select(.status.conta
kubectl -n ${name_space} delete pod helm-nacos-0 --force
kubectl -n ${name_space} logs helm-nacos-0

View File

@@ -1,7 +1,7 @@
#!/bin/bash
harbor_host=10.20.1.130:8033
namespace=jxyd
harbor_host=172.16.0.31:8033
namespace=shbj
app_name=""
new_tag=""

View File

@@ -5,7 +5,7 @@ gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
DockerRegisterDomain="192.168.10.3:8033" # 需要根据实际修改
DockerRegisterDomain="172.16.100.55:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
@@ -163,6 +163,6 @@ test(){
}
# test
#Download_Load_Tag_Upload "rke"
Download_Load_Tag_Upload "rke"
Load_Tag_Upload "cmii"
# Load_Tag_Upload "cmii"