大量更新

This commit is contained in:
zeaslity
2025-02-21 14:00:16 +08:00
parent 20c4f57715
commit c5780f90d7
85 changed files with 50718 additions and 67 deletions

View File

@@ -1,15 +1,25 @@
#!/bin/bash
# 下载agent
wget http://42.192.52.227:9000/octopus/octopus-agent_linux_amd64
mv ./octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-amd64-20.10.15.tgz
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/harbor-offline-installer-v2.9.0.tgz
# 下载依赖文件
mkdir /root/wdd/
cd /root/wdd
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-linux-x86_64-v2.18.0
export oss_url_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd
# export oss_url_prefix=http://42.192.52.227:9000/octopus
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/rke_linux-amd64
wget ${oss_url_prefix}/docker-amd64-20.10.15.tgz
wget ${oss_url_prefix}/docker-compose-linux-x86_64-v2.18.0
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip
wget ${oss_url_prefix/harbor-offline-installer-v2.9.0.tgz
wget ${oss_url_prefix}/rke_linux-amd64
wget ${oss_url_prefix}/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip

View File

@@ -5,37 +5,45 @@ host_list=(10.129.80.218 10.129.80.245 10.129.80.222 10.129.80.223)
host_list=(10.129.80.217 10.129.80.245 10.129.80.222 10.129.80.223)
for ip in "${host_list[@]}";do
echo "current ip is $ip"
for server in "${host_list[@]}";do
echo "current ip is $server"
ssh root@${server} "curl 172.24.152.72"
done
disk
10.129.80.245
mv /root/wdd/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
wget http://42.192.52.227:9000/octopus/octopus-agent_linux_amd64
mv octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent
wget http://42.192.52.227:9000/octopus/docker-amd64-20.10.15.tgz
ssh root@${server} "mkdir /root/wdd"
scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
scp /root/wdd/disk.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/disk.sh && lsblk"
ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'sshkey\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
printf 'dockerconfig\n' | octopus-agent --mode=bastion
scp /etc/docker/daemon.json root@${server}:/etc/docker/
ssh root@${server} "systemctl restart docker && sleep 3 && docker info"
sed -i '/^$/d' ~/.ssh/*
@@ -43,7 +51,12 @@ sed -i '/^$/d' ~/.ssh/*
chmod +x /usr/local/bin/octopus-agent
printf 'firewall\n' | octopus-agent --mode=bastion
printf 'sysconfig\n' | octopus-agent --mode=bastion
printf 'sshkey\n' | octopus-agent --mode=bastion
printf 'sshconfig\n' | octopus-agent --mode=bastion
printf 'swap\n' | octopus-agent --mode=bastion
printf 'selinux\n' | octopus-agent --mode=bastion
printf 'docker\n' | octopus-agent --mode=bastion
printf 'dockercompose\n' | octopus-agent --mode=bastion
printf 'dockerconfig\n' | octopus-agent --mode=bastion

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=zjyd
namespace=jxyd
install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
@@ -43,7 +43,7 @@ backup_all_stateful_sets() {
echo ""
}
# install_yq
install_yq
backup_all_deployment
backup_all_service
backup_all_stateful_sets

View File

@@ -0,0 +1,5 @@
docker run -d --name emqx -p 38085:18083 -p 31883:1883 -p 38083:8083 docker.107421.xyz/emqx/emqx:v4.0.0

View File

@@ -1,3 +1,7 @@
#/bin/bash
# https://blog.csdn.net/weixin_43902588/article/details/142279993 参考文档
export QUAY=/var/lib/docker/quay
mkdir -p $QUAY/postgres
@@ -15,18 +19,30 @@ docker exec -it postgresql /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg
docker inspect -f "{{.NetworkSettings.IPAddress}}" postgresql
docker run -it --name redis \
# 不需要执行
docker run -it --rm --name redis \
-p 6379:6379 \
redis:6.2.14 \
redis-server --test-memory
redis-server --test-memory 4096
docker inspect -f "{{.NetworkSettings.IPAddress}}" redis
docker run --rm -it --name quay_config -p 8080:8080 quay.io/projectquay/quay:3.11.1 config secret
mkdir $QUAY/storage
mkdir $QUAY/config
docker run -d --name redis \
-p 6379:6379 \
-m 4g \
redis:6.2.14 \
--requirepass strongpassword
docker inspect -f "{{.NetworkSettings.IPAddress}}" redis
# 运行config服务 需要浏览器打开 http://172.31.2.7:8080/,并用 quayconfig/secret 登录。
docker run --rm -it --name quay_config -p 8080:8080 quay.io/projectquay/quay:3.11.1 config secret
setfacl -m u:1001:-wx $QUAY/storage
setfacl -m u:1001:-wx $QUAY/config
@@ -38,12 +54,12 @@ docker run -p 8033:8080 \
-d quay.io/projectquay/quay:3.11.1
docker run -d --name redis \
-p 6379:6379 \
-m 4g \
redis:6.2.14 \
--requirepass strongpassword
# 登录dashboard 8033 创建账户和密码 创建项目
admin
V2ryStr@ngPss
docker login --tls-verify=false quay:8033
V2ryStr@ngPss
docker login --tls-verify=false -u admin -p V2ryStr@ngPss 172.31.2.7:8033
docker login -u admin -p V2ryStr@ngPss 172.31.2.7:8033
docker pull 172.31.2.7:8033/admin/cmii

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
name_space=jlyd
name_space=uavcloud-demo
delete_all_fronted_cmii_pod(){
@@ -26,4 +26,4 @@ delete_all_backend_cmii_pod(){
done
}
delete_all_fronted_cmii_pod
delete_all_backend_cmii_pod

View File

@@ -1,7 +1,7 @@
#!/bin/bash
harbor_host=10.40.51.5:8033
namespace=jsntejpt
harbor_host=10.20.1.130:8033
namespace=jxyd
app_name=""
new_tag=""

View File

@@ -1,9 +1,9 @@
#!/bin/bash
minio_local_path=/var/lib/docker/minio-pv/pv1
harbor_host=192.168.40.42:8033
inner_master_ip=192.168.40.42
minio_host_ip=192.168.40.193
harbor_host=172.31.2.7:8033/admin
inner_master_ip=172.31.2.7
minio_host_ip=172.31.2.9
install_minio(){
@@ -22,13 +22,13 @@ services:
ports:
- "9000:9000"
- "9001:9001"
image: '${harbor_host}/cmii/minio:2022.5.4'
image: '${harbor_host}/cmii/minio:2023.5.4'
environment:
- MINIO_ROOT_USER=cmii
- MINIO_ROOT_PASSWORD=B#923fC7mk
restart: always
volumes:
- ${minio_local_path}:/data
- ${minio_local_path}:/mnt/data
EOF
echo "start minio container !"
@@ -99,7 +99,7 @@ init_minio(){
}
install_docker_compose
# install_docker_compose
install_minio

View File

@@ -1,10 +1,8 @@
upstream proxy_server {
ip_hash;
server 192.168.40.42:30500;
server 192.168.40.183:30500;
server 192.168.40.130:30500;
server 192.168.40.131:30500;
server 192.168.40.174:30500;
server 172.31.2.7:30500;
server 172.31.2.8:30500;
server 172.31.2.9:30500;
}
server {
@@ -23,7 +21,7 @@ server {
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.lnydyj.io;
proxy_set_header Host fake-domain.szgz.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

View File

@@ -1,6 +1,10 @@
#!/bin/bash
mkdir /root/.kube
cp kube_config_cluster.yml /root/.kube/config
kubectl apply -f k8s-dashboard.yaml
kubectl delete -f k8s-dashboard.yaml
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
@@ -24,10 +28,10 @@ kubectl apply -f k8s-emqx.yaml
kubectl delete -f k8s-emqx.yaml
kubectl apply -f k8s-rabbitmq.yaml
kubectl delete -f k8s-rabbitmq.yam
kubectl delete -f k8s-rabbitmq.yaml
kubectl apply -f k8s-redis.yaml
kubectl delete -f k8s-redis.yamll
kubectl delete -f k8s-redis.yaml
kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml
@@ -42,12 +46,15 @@ kubectl delete -f k8s-nacos.yaml
kubectl apply -f k8s-configmap.yaml
kubectl delete -f k8s-configmap.yaml
vi k8s-ingress.yaml
kubectl apply -f k8s-ingress.yaml
kubectl delete -f k8s-ingress.yaml
vi k8s-frontend.yaml
kubectl apply -f k8s-frontend.yaml
kubectl delete -f k8s-frontend.yaml
vi k8s-backend.yaml
kubectl apply -f k8s-backend.yaml
kubectl delete -f k8s-backend.yaml

View File

@@ -2,11 +2,10 @@
kubectl get nodes --show-labels
kubectl label nodes 192.168.40.158 uavcloud.env=lnydyj --overwrite
kubectl label nodes 192.168.40.183 uavcloud.env=lnydyj --overwrite
kubectl label nodes 192.168.40.131 uavcloud.env=lnydyj --overwrite
kubectl label nodes 192.168.40.130 uavcloud.env=lnydyj --overwrite
kubectl label nodes 192.168.40.174 uavcloud.env=lnydyj --overwrite
kubectl label nodes 172.31.2.7 uavcloud.env=szgz --overwrite
kubectl label nodes 172.31.2.8 uavcloud.env=szgz --overwrite
kubectl label nodes 172.31.2.9 uavcloud.env=szgz --overwrite
# 删除label
kubectl label nodes 192.168.40.193 uavcloud.env-

View File

@@ -53,6 +53,12 @@ clean_rke_cluster() {
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F
ip6tables -F && ip6tables -t nat -F && ip6tables -t mangle -F && ip6tables -t raw -F
rke remove --force
printf "y/n" | docker container prune
rke -d up
}
clean_rke_cluster

View File

@@ -0,0 +1,154 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
cmlc_app_image_list="szga-0711.txt" # 需要修改版本
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
middleware_image_list="middleware-images.txt" # 一般不需要修改
#DockerRegisterDomain="20.47.129.116:8033" # 需要根据实际修改
DockerRegisterDomain="172.31.2.7:8033/admin" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
downloadAllNeededImages() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull --platform linux/arm64 "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
shift
done
}
downloadAllNeededImagesAndCompress() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull --platform linux/arm64 "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
shift
done
echo "已经完成打包工作!"
}
pushRKEImageToHarbor(){
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}");
done < "${rancher_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
case $i in
*/*)
image_name="${DockerRegisterDomain}/${i}"
;;
*)
image_name="${DockerRegisterDomain}/rancher/${i}"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${i}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushCMLCAPPImageToHarbor(){
app_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
app_images+=("${i}");
done < "${cmlc_app_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${app_images[@]}"; do
[ -z "${app}" ] && continue
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushMiddlewareImageToHarbor(){
middleware_image=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
middleware_image+=("${i}");
done < "${middleware_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${middleware_image[@]}"; do
[ -z "${app}" ] && continue
case ${app} in
*/*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
;;
*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
# downloadAllNeededImagesAndCompress "${middleware_image_list}"
#downloadAllNeededImages "${rancher_image_list}"
#pushRKEImageToHarbor
#pushCMLCAPPImageToHarbor
pushMiddlewareImageToHarbor

View File

@@ -23,6 +23,10 @@ downloadAllNeededImages() {
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
@@ -45,6 +49,9 @@ downloadAllNeededImagesAndCompress() {
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
@@ -139,7 +146,7 @@ pushMiddlewareImageToHarbor(){
}
downloadAllNeededImagesAndCompress "${cmlc_app_image_list}"
downloadAllNeededImagesAndCompress "${middleware_image_list}"
#downloadAllNeededImages "${rancher_image_list}"
#pushRKEImageToHarbor

View File

@@ -1,13 +1,13 @@
# https://dl.min.io/client/mc/release/windows-amd64/mc.exe
#mc.exe alias set uav-demo https://oss.demo.uavcmlc.com cmii B#923fC7mk
& "C:\Users\makn\Downloads\mc.exe" alias set uav-demo https://oss.demo.uavcmlc.com cmii B#923fC7mk
mc.exe ls uav-demo/cmlc-installation/6.1.1/ | ForEach-Object {
& "C:\Users\makn\Downloads\mc.exe" ls uav-demo/cmlc-installation/6.2.0-szgz-arm/ | ForEach-Object {
$item=($_.Split()[-1])
Write-Host "start to download $item "
mc.exe get uav-demo/cmlc-installation/6.1.1/$item "D:\CmiiDeployOffline\ZheJiangErJiPingTai\"
& "C:\Users\makn\Downloads\mc.exe" get uav-demo/cmlc-installation/6.2.0-szgz-arm/$item "D:\Desktop\cmii\cmii\"
Write-Host ""
}

View File

@@ -0,0 +1,63 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/coreos-flannel:v0.13.0-rancher1
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/cluster-proportional-autoscaler:1.8.1
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,19 @@
bitnami/redis:6.2.14-debian-11-r1
bitnami/mysql:8.1.0-debian-11-r42
simonrupf/chronyd:0.4.3
bitnami/bitnami-shell:11-debian-11-r136
bitnami/rabbitmq:3.11.26-debian-11-r2
ossrs/srs:v5.0.195
emqx/emqx:4.4.19
emqx/emqx:5.5.1
nacos/nacos-server:v2.1.2-slim
mongo:5.0
bitnami/minio:2023.5.4
kubernetesui/dashboard:v2.0.1
kubernetesui/metrics-scraper:v1.0.4
nginx:1.24.0
redis:6.0.20-alpine
dyrnq/nfs-subdir-external-provisioner:v4.0.2
jerrychina2020/rke-tools:v0.175-linux
jerrychina2020/rke-tools:v0.175
busybox:latest

View File

@@ -1,7 +1,7 @@
#!/bin/bash
harbor_host=10.40.51.5:8033
namespace=zygazww
harbor_host=172.31.2.7:8033/admin
namespace=szgz
app_name=""
new_tag=""