大量更新
This commit is contained in:
40
53-202501-江西扩容/1-磁盘.sh
Normal file
40
53-202501-江西扩容/1-磁盘.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
#! /bin/bash
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
echo "-----------------------------------------------------------------------"
|
||||
|
||||
export VG_NAME=datavg
|
||||
|
||||
echo "n
|
||||
p
|
||||
|
||||
|
||||
|
||||
t
|
||||
|
||||
8e
|
||||
w
|
||||
" | fdisk /dev/vdb
|
||||
partprobe
|
||||
# 如果已经存在卷组,直接进行添加
|
||||
# vgextend rootvg /dev/vdb
|
||||
vgcreate ${VG_NAME} /dev/vdb1
|
||||
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
|
||||
# 大小根据实际情况调整
|
||||
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
|
||||
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
|
||||
# mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
|
||||
mkdir -p /data
|
||||
mkdir -p /var/lib/docker
|
||||
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
|
||||
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
|
||||
echo "${selffstab}" >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
df -TH
|
||||
echo "-----------------------------------------------------------------------"
|
||||
168
53-202501-江西扩容/ImageSyncDLTU.sh
Normal file
168
53-202501-江西扩容/ImageSyncDLTU.sh
Normal file
@@ -0,0 +1,168 @@
|
||||
#!/bin/bash
|
||||
|
||||
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
|
||||
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
|
||||
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
|
||||
local_gzip_path="/root/octopus-image"
|
||||
|
||||
DockerRegisterDomain="10.20.1.130:8033" # 需要根据实际修改
|
||||
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
|
||||
|
||||
print_green() {
|
||||
echo -e "\033[32m${1}\033[0m"
|
||||
echo ""
|
||||
}
|
||||
|
||||
print_red() {
|
||||
echo -e "\033[31m${1}\033[0m"
|
||||
echo ""
|
||||
}
|
||||
|
||||
Download_Load_Tag_Upload() {
|
||||
print_green "[DLTU] - start !"
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
rke)
|
||||
# print_green "download rke "
|
||||
local_gzip_path="$local_gzip_path/rke"
|
||||
mkdir -p ${local_gzip_path}
|
||||
oss_prefix_url="$oss_prefix_url/rke/"
|
||||
dltu
|
||||
shift # past argument
|
||||
;;
|
||||
middle)
|
||||
local_gzip_path="$local_gzip_path/middle"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/middle/"
|
||||
dltu
|
||||
shift # past argument
|
||||
;;
|
||||
cmii)
|
||||
local_gzip_path="$local_gzip_path/cmii"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/6.2.0-demo/"
|
||||
dltu
|
||||
shift # past argument
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
print_red "bad arguments"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
dltu() {
|
||||
print_green "download all image name list and gzip file list!"
|
||||
cd $local_gzip_path || exit
|
||||
|
||||
rm $all_image_list_txt
|
||||
rm $gzip_image_list_txt
|
||||
|
||||
wget "$oss_prefix_url$all_image_list_txt"
|
||||
wget "$oss_prefix_url$gzip_image_list_txt"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
echo ""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "download gzip file =>: $oss_prefix_url${i}"
|
||||
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
|
||||
echo "Gzip file download success : ${i}"
|
||||
image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
|
||||
|
||||
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
|
||||
echo "extract short name is $app_name"
|
||||
|
||||
if echo $image_full_name | grep -q "rancher"
|
||||
then
|
||||
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
|
||||
docker push $DockerRegisterDomain/rancher/$app_name
|
||||
else
|
||||
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
|
||||
docker push $DockerRegisterDomain/cmii/$app_name
|
||||
fi
|
||||
|
||||
else
|
||||
print_red "Gzip file download FAILED : ${i}"
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${gzip_image_list_txt}"
|
||||
shift
|
||||
|
||||
}
|
||||
|
||||
Load_Tag_Upload(){
|
||||
print_green "[LTU] - start to load image from offline !"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
rke)
|
||||
# print_green "download rke "
|
||||
local_gzip_path="$local_gzip_path/rke"
|
||||
mkdir -p ${local_gzip_path}
|
||||
oss_prefix_url="$oss_prefix_url/rke/"
|
||||
ltu
|
||||
shift # past argument
|
||||
;;
|
||||
middle)
|
||||
local_gzip_path="$local_gzip_path/middle"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/middle/"
|
||||
ltu
|
||||
shift # past argument
|
||||
;;
|
||||
cmii)
|
||||
local_gzip_path="$local_gzip_path/6.1.1"
|
||||
mkdir -p $local_gzip_path
|
||||
oss_prefix_url="$oss_prefix_url/6.1.1/"
|
||||
ltu
|
||||
shift # past argument
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
print_red "bad arguments"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
ltu(){
|
||||
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
|
||||
|
||||
for file in $all_file_list; do
|
||||
echo "offline gzip file is => : $file"
|
||||
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
|
||||
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
|
||||
echo "extract short name is $app_name"
|
||||
|
||||
if echo $image_full_name | grep -q "rancher"
|
||||
then
|
||||
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
|
||||
docker push $DockerRegisterDomain/rancher/$app_name
|
||||
else
|
||||
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
|
||||
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
|
||||
docker push $DockerRegisterDomain/cmii/$app_name
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
test(){
|
||||
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
|
||||
echo "extract short name is $app_name"
|
||||
}
|
||||
|
||||
# test
|
||||
Download_Load_Tag_Upload "cmii"
|
||||
|
||||
# Load_Tag_Upload "cmii"
|
||||
6288
53-202501-江西扩容/all-deployment-jxyd.yaml
Normal file
6288
53-202501-江西扩容/all-deployment-jxyd.yaml
Normal file
File diff suppressed because it is too large
Load Diff
209
53-202501-江西扩容/cluster.yml
Normal file
209
53-202501-江西扩容/cluster.yml
Normal file
@@ -0,0 +1,209 @@
|
||||
nodes:
|
||||
- address: 10.20.1.130
|
||||
user: root
|
||||
role:
|
||||
- controlplane
|
||||
- etcd
|
||||
- worker
|
||||
internal_address: 10.20.1.130
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
- address: 10.20.1.133
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 10.20.1.133
|
||||
- address: 10.20.1.134
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 10.20.1.134
|
||||
labels:
|
||||
mysql-deploy: true
|
||||
- address: 10.20.1.141
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 10.20.1.141
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
- address: 10.20.1.142
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 10.20.1.142
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
- address: 10.20.1.144
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 10.20.1.144
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
- address: 10.20.1.145
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 10.20.1.145
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
|
||||
|
||||
authentication:
|
||||
strategy: x509
|
||||
sans:
|
||||
- "10.20.1.130"
|
||||
|
||||
private_registries:
|
||||
- url: 10.20.1.130:8033 # 私有镜像库地址
|
||||
user: admin
|
||||
password: "V2ryStr@ngPss"
|
||||
is_default: true
|
||||
|
||||
##############################################################################
|
||||
|
||||
# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
|
||||
ignore_docker_version: true
|
||||
|
||||
# Set the name of the Kubernetes cluster
|
||||
cluster_name: rke-cluster
|
||||
|
||||
kubernetes_version: v1.20.4-rancher1-1
|
||||
|
||||
ssh_key_path: /root/.ssh/id_ed25519
|
||||
# ssh_key_path: /root/.ssh/id_rsa
|
||||
|
||||
# Enable running cri-dockerd
|
||||
# Up to Kubernetes 1.23, kubelet contained code called dockershim
|
||||
# to support Docker runtime. The replacement is called cri-dockerd
|
||||
# and should be enabled if you want to keep using Docker as your
|
||||
# container runtime
|
||||
# Only available to enable in Kubernetes 1.21 and higher
|
||||
enable_cri_dockerd: true
|
||||
|
||||
services:
|
||||
etcd:
|
||||
backup_config:
|
||||
enabled: false
|
||||
interval_hours: 72
|
||||
retention: 3
|
||||
safe_timestamp: false
|
||||
timeout: 300
|
||||
creation: 12h
|
||||
extra_args:
|
||||
election-timeout: 5000
|
||||
heartbeat-interval: 500
|
||||
gid: 0
|
||||
retention: 72h
|
||||
snapshot: false
|
||||
uid: 0
|
||||
|
||||
kube-api:
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-controller
|
||||
service_cluster_ip_range: 172.24.0.0/16
|
||||
# Expose a different port range for NodePort services
|
||||
service_node_port_range: 30000-40000
|
||||
always_pull_images: true
|
||||
pod_security_policy: false
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Enable audit log to stdout
|
||||
audit-log-path: "-"
|
||||
# Increase number of delete workers
|
||||
delete-collection-workers: 3
|
||||
# Set the level of log output to warning-level
|
||||
v: 1
|
||||
kube-controller:
|
||||
# CIDR pool used to assign IP addresses to pods in the cluster
|
||||
cluster_cidr: 172.28.0.0/16
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-api
|
||||
service_cluster_ip_range: 172.24.0.0/16
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Set the level of log output to debug-level
|
||||
v: 1
|
||||
# Enable RotateKubeletServerCertificate feature gate
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
# Enable TLS Certificates management
|
||||
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
|
||||
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
|
||||
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
|
||||
kubelet:
|
||||
# Base domain for the cluster
|
||||
cluster_domain: cluster.local
|
||||
# IP address for the DNS service endpoint
|
||||
cluster_dns_server: 172.24.0.10
|
||||
# Fail if swap is on
|
||||
fail_swap_on: false
|
||||
# Set max pods to 250 instead of default 110
|
||||
extra_binds:
|
||||
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
|
||||
extra_args:
|
||||
max-pods: 122
|
||||
# Optionally define additional volume binds to a service
|
||||
scheduler:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
kubeproxy:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 1
|
||||
|
||||
authorization:
|
||||
mode: rbac
|
||||
|
||||
addon_job_timeout: 30
|
||||
|
||||
# Specify network plugin-in (canal, calico, flannel, weave, or none)
|
||||
network:
|
||||
options:
|
||||
flannel_backend_type: vxlan
|
||||
flannel_iface: eth0
|
||||
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
|
||||
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
|
||||
plugin: flannel
|
||||
|
||||
# Specify DNS provider (coredns or kube-dns)
|
||||
dns:
|
||||
provider: coredns
|
||||
nodelocal: {}
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 20%
|
||||
maxSurge: 15%
|
||||
linear_autoscaler_params:
|
||||
cores_per_replica: 0.34
|
||||
nodes_per_replica: 4
|
||||
prevent_single_point_failure: true
|
||||
min: 2
|
||||
max: 3
|
||||
|
||||
# Specify monitoring provider (metrics-server)
|
||||
monitoring:
|
||||
provider: metrics-server
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 8
|
||||
|
||||
ingress:
|
||||
provider: nginx
|
||||
default_backend: true
|
||||
http_port: 0
|
||||
https_port: 0
|
||||
extra_envs:
|
||||
- name: TZ
|
||||
value: Asia/Shanghai
|
||||
node_selector:
|
||||
ingress-deploy: true
|
||||
options:
|
||||
use-forwarded-headers: "true"
|
||||
40
53-202501-江西扩容/批量任务.sh
Normal file
40
53-202501-江西扩容/批量任务.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
|
||||
rm octopus-agent_linux_amd64
|
||||
wget http://42.192.52.227:9000/octopus/octopus-agent_linux_amd64
|
||||
|
||||
host_list=(10.129.80.217)
|
||||
host_list=(10.20.1.133 10.20.1.134)
|
||||
host_list=(10.20.1.141 10.20.1.142 10.20.1.144 10.20.1.145)
|
||||
|
||||
for server in "${host_list[@]}";do
|
||||
echo "current ip is $server"
|
||||
# scp /home/app/octopus-agent_linux_amd64 app@${server}:/home/app/octopus-agent_linux_amd64
|
||||
# scp /root/wdd/disk.sh root@${server}:/root/wdd/
|
||||
# ssh root@${server} "bash /root/wdd/disk.sh && lsblk"
|
||||
# scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
|
||||
# scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
|
||||
# ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
|
||||
# scp /etc/docker/daemon.json root@${server}:/etc/docker/daemon.json
|
||||
# ssh root@${server} "systemctl restart docker"
|
||||
# ssh root@${server} "docker info"
|
||||
ssh root@${server} "yum install -y nfs-utils && systemctl enable nfs-client"
|
||||
|
||||
done
|
||||
|
||||
|
||||
rm -f /usr/local/bin/octopus-agent
|
||||
mv -f /home/app/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
|
||||
chmod +x /usr/local/bin/octopus-agent
|
||||
|
||||
printf 'sshkey\n' | octopus-agent --mode=bastion
|
||||
printf 'sshconfig\n' | octopus-agent --mode=bastion
|
||||
|
||||
|
||||
# 修改可以使用root登录
|
||||
cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back
|
||||
sed -i
|
||||
Reference in New Issue
Block a user