This commit is contained in:
zeaslity
2024-10-30 16:30:51 +08:00
commit 437acbeb63
3363 changed files with 653948 additions and 0 deletions

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
# 需要在所有的节点执行
hostnamectl set-hostname master-node
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
systemctl restart sshd
cat >> /etc/hosts << EOF
10.129.80.218 master-node
10.129.80.217 worker-1
10.129.80.245 worker-2
10.129.80.222 worker-3
10.129.80.223 storage-1
EOF
b64>>d2VuOjEyMjkwNTEyODcyNzVAcm9vdEAxMC4xMjkuODAuMjE4OjIyOlNTSDI= master
b64>>d2VuOjk0NDg3MDMzMDU2NzBAcm9vdEAxMC4xMjkuODAuMjE3OjIyOlNTSDI=
b64>>d2VuOjM2MjE3MzA4NTc4MDhAcm9vdEAxMC4xMjkuODAuMjE3OjIyOlNTSDI= 10.129.80.217 worker-1
b64>>d2VuOjQwMzg1MzEyODQzNzhAcm9vdEAxMC4xMjkuODAuMjQ1OjIyOlNTSDI= 10.129.80.245 worker-2
b64>>d2VuOjQ0OTg0OTI1MjM1MDBAcm9vdEAxMC4xMjkuODAuMjQ1OjIyOlNTSDI= 10.129.80.222 worker-3
b64>>d2VuOjk2MjA1MjI1ODE0NTlAcm9vdEAxMC4xMjkuODAuMjIzOjIyOlNTSDI= 10.129.80.223 storage-1
ping worker-1
yum clean all && yum makecache
S@Znwr#113

View File

@@ -0,0 +1,95 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
#yum install lvm2 -y
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'
#echo "n
#p
#
#
#
#t
#
#8e
#w
#" | fdisk /dev/vdb
#partprobe
#vgextend klas_host-10-190-202-141 /dev/vda4
#lvextend -l +100%FREE /dev/mapper/klas_host--10--190--202--141-root
#partprobe
#xfs_growfs /dev/mapper/klas_host--10--190--202--141-root
#df -TH

View File

@@ -0,0 +1,170 @@
#!/bin/bash
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/wdd/octopus_image"
DockerRegisterDomain="10.129.80.218:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
echo -e "\033[32m${1}\033[0m"
echo ""
}
print_red() {
echo -e "\033[31m${1}\033[0m"
echo ""
}
Download_Load_Tag_Upload() {
print_green "[DLTU] - start !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
dltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
dltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/zhhq_6.0.0/"
dltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
dltu() {
print_green "download all image name list and gzip file list!"
cd $local_gzip_path || exit
rm $all_image_list_txt
rm $gzip_image_list_txt
wget "$oss_prefix_url$all_image_list_txt"
wget "$oss_prefix_url$gzip_image_list_txt"
# docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
echo ""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "download gzip file =>: $oss_prefix_url${i}"
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
echo "Gzip file download success : ${i}"
# image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
#
# app_name=$(echo "$image_full_name" | sed 's|.*/||g')
# echo "extract short name is $app_name"
# if echo $image_full_name | grep -q "rancher"
# then
# print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
# docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
# docker push $DockerRegisterDomain/rancher/$app_name
# else
# print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
# docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
# docker push $DockerRegisterDomain/cmii/$app_name
# fi
else
print_red "Gzip file download FAILED : ${i}"
fi
echo "-------------------------------------------------"
done <"${gzip_image_list_txt}"
shift
}
Load_Tag_Upload(){
print_green "[LTU] - start to load image from offline !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
ltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
ltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/ehejpt/"
ltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
ltu(){
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
for file in $all_file_list; do
echo "offline gzip file is => : $file"
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
done
}
test(){
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
echo "extract short name is $app_name"
}
# test
#Download_Load_Tag_Upload "rke" "middle"
Load_Tag_Upload "cmii"

View File

@@ -0,0 +1,228 @@
nodes:
- address: 10.129.80.218
user: root
role:
- controlplane
- etcd
- worker
internal_address: 10.129.80.218
labels:
ingress-deploy: true
- address: 10.129.80.217
user: root
role:
- worker
internal_address: 10.129.80.217
labels:
mysql-deploy: true
- address: 10.129.80.245
user: root
role:
- worker
internal_address: 10.129.80.245
labels:
uavcloud.env: zhhq
ingress-deploy: true
- address: 10.129.80.222
user: root
role:
- worker
internal_address: 10.129.80.222
labels:
uavcloud.env: zhhq
ingress-deploy: true
- address: 10.129.80.223
user: root
role:
- worker
internal_address: 10.129.80.223
labels:
uavcloud.env: zhhq
ingress-deploy: true
authentication:
strategy: x509
sans:
- "10.129.80.218"
private_registries:
- url: 10.129.80.218:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
#ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens34
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: calico
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,111 @@
#!/bin/bash
minio_local_path=/var/lib/docker/minio-pv/pv1
harbor_host=10.129.80.218:8033
inner_master_ip=10.129.80.218
minio_host_ip=10.129.80.223
install_minio(){
echo "start to create minio local path !"
mkdir -p ${minio_local_path}
chmod -R 777 ${minio_local_path}
mkdir -p /root/wdd/install/
cat > /root/wdd/install/minio-docker-compose.yaml <<EOF
version: '2'
services:
minio1:
ports:
- "9000:9000"
- "9001:9001"
image: '${harbor_host}/cmii/minio:2022.5.4'
environment:
- MINIO_ROOT_USER=cmii
- MINIO_ROOT_PASSWORD=B#923fC7mk
restart: always
volumes:
- ${minio_local_path}:/data
EOF
echo "start minio container !"
docker-compose -f /root/wdd/install/minio-docker-compose.yaml up -d
echo ""
}
install_docker_compose(){
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-amd64 -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
}
init_minio(){
echo "start to download mc!"
if [[ ! -f /usr/local/bin/mc ]]; then
curl https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mc -o /usr/local/bin/mc
chmod +x /usr/local/bin/mc
fi
# curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /usr/local/bin/mc
echo ""
sleep 5
export tenant_name=outside
mc alias set ${tenant_name} http://${minio_host_ip}:9000 cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata
echo ""
echo "set rabbit mq"
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
echo ""
echo "sleep 5 s!"
sleep 5
mc admin service restart ${tenant_name}
echo "sleep 5 s!"
sleep 5
echo ""
echo "start to add event notification !"
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus
echo ""
echo "done of init !"
}
#install_docker_compose
install_minio
if [[ $(docker inspect -f '{{.State.Running}}' install-minio1-1) -eq "true" ]]; then
echo "minio is running now! start to init minio!"
init_minio
fi
#init_minio

View File

@@ -0,0 +1,14 @@
version: '3'
services:
cmii-nginx:
image: 10.129.80.218:8033/cmii/nginx:1.21.3
volumes:
- /etc/nginx/conf.d:/etc/nginx/conf.d
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
- /root/offline_map:/root/offline_map
- /root/offline:/root/offline
ports:
- "8088:8088"
- "8089:8089"
restart: always

View File

@@ -0,0 +1,49 @@
upstream ccc {
ip_hash;
server 10.129.80.222:30500;
server 10.129.80.223:30500;
}
server {
listen 8088;
server_name localhost;
location / {
proxy_pass http://ccc;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.zhhq.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -0,0 +1,20 @@
server {
listen 8089;
server_name localhost;
#允许跨域请求的域,*代表所有
add_header 'Access-Control-Allow-Origin' *;
#允许带上cookie请求
add_header 'Access-Control-Allow-Credentials' 'true';
#允许请求的方法,比如 GET/POST/PUT/DELETE
add_header 'Access-Control-Allow-Methods' *;
#允许请求的header
add_header 'Access-Control-Allow-Headers' *;
location / {
root /root/offline/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
}

View File

@@ -0,0 +1,37 @@
#!/bin/bash
host_list=(10.129.80.217)
host_list=(10.129.80.218 10.129.80.245 10.129.80.222 10.129.80.223)
host_list=(10.129.80.217 10.129.80.245 10.129.80.222 10.129.80.223)
for ip in "${host_list[@]}";do
echo "current ip is $ip"
ssh root@${ip} "curl 172.24.152.72"
done
disk
10.129.80.245
# ssh root@${ip} "mkdir /root/wdd"
# scp /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 root@${ip}:/root/wdd/
# scp /root/wdd/docker-amd64-20.10.15.tgz root@${ip}:/root/wdd/
# scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
# scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
# scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${ip}:/root/wdd/
# ssh root@${ip} "chmod +x /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44"
# ssh root@${ip} "printf 'firewall\n' | /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 --mode=bastion"
# ssh root@${ip} "printf 'sysconfig\n' | /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 --mode=bastion"
# ssh root@${ip} "printf 'swap\n' | /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 --mode=bastion"
# ssh root@${ip} "printf 'selinux\n' | /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 --mode=bastion"
# ssh root@${ip} "printf 'docker\n' | /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 --mode=bastion"
# ssh root@${ip} "printf 'dockercompose\n' | /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 --mode=bastion"
scp /etc/docker/daemon.json root@${ip}:/etc/docker/
ssh root@${ip} "systemctl restart docker && sleep 3 && docker info"

View File

@@ -0,0 +1,45 @@
#!/bin/bash
# 数据表备份
export mysql_exec_file_prefix=/root/wdd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin
export mysql_port=33306
export SQL_DUMP_FILE=/root/wdd/all_tables_5.4.0.sql
export SQL_DDL_FILE="/root/wdd/mysql/"
backup_all_structure() {
echo ""
echo ""
echo "start to backup all-table-structure!"
$mysql_exec_file_prefix/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs $mysql_exec_file_prefix/mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port --single-transaction --source-data=2 --hex-blob --triggers --routines --events --no-data --set-gtid-purged=OFF --databases >"${SQL_DUMP_FILE}"
echo ""
}
backup_all_dump() {
echo ""
echo ""
echo ""
echo "start to backup all-table-database-data!"
export SQL_FULL_BACK_UP_FILE=/root/wdd/all_tables_5.4.0_20240904_fullback.sql
$mysql_exec_file_prefix/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs $mysql_exec_file_prefix/mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port --single-transaction --source-data=2 --hex-blob --triggers --routines --events --set-gtid-purged=OFF --databases >"${SQL_FULL_BACK_UP_FILE}"
echo ""
}
import_ddl_sql(){
echo ""
echo ""
echo "start to import ddl sql !"
for sql_file in $(ls "$SQL_DDL_FILE" | sort -n -k1.1,1.2); do
echo "current file is ${sql_file}"
sudo $mysql_exec_file_prefix/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port <"$SQL_DDL_FILE/${sql_file}"
echo "------------------"
echo ""
done
echo ""
}
import_ddl_sql