延庆+山东项目
This commit is contained in:
35
44-202411-厦门移动扩容/0.0-dependencies.sh
Normal file
35
44-202411-厦门移动扩容/0.0-dependencies.sh
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# 需要在所有的节点执行
|
||||
|
||||
hostnamectl set-hostname storage-1
|
||||
|
||||
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
|
||||
|
||||
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
|
||||
systemctl restart sshd
|
||||
|
||||
cat > /etc/hosts << EOF
|
||||
192.168.0.8 master-node
|
||||
192.168.0.65 worker-1
|
||||
192.168.0.45 worker-2
|
||||
192.168.0.7 worker-3
|
||||
192.168.0.9 worker-4
|
||||
192.168.0.10 worker-5
|
||||
192.168.0.11 worker-6
|
||||
192.168.0.84 worker-7
|
||||
192.168.0.85 worker-8
|
||||
192.168.0.86 worker-9
|
||||
192.168.0.2 storage-1
|
||||
192.168.0.15 gpu-1
|
||||
192.168.0.12 gpu-2
|
||||
192.168.0.13 gpu-3
|
||||
EOF
|
||||
|
||||
yum clean all && yum makecache
|
||||
|
||||
36.134.71.138
|
||||
|
||||
Lingyun@443
|
||||
|
||||
|
||||
75
44-202411-厦门移动扩容/0.1-mountNodeVolume.sh
Normal file
75
44-202411-厦门移动扩容/0.1-mountNodeVolume.sh
Normal file
@@ -0,0 +1,75 @@
|
||||
#! /bin/bash
|
||||
|
||||
# 关闭虚拟缓存
|
||||
swapoff -a
|
||||
cp -f /etc/fstab /etc/fstab_bak
|
||||
cat /etc/fstab_bak | grep -v swap >/etc/fstab
|
||||
|
||||
# echo "-----------------------------------------------------------------------"
|
||||
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
|
||||
# echo "扩容之前的root目录的容量为:${RootVolumeSizeBefore}"
|
||||
|
||||
# echo "y
|
||||
|
||||
|
||||
# " | lvremove /dev/mapper/centos-swap
|
||||
|
||||
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
|
||||
|
||||
# lvextend -l+${freepesize} /dev/mapper/centos-root
|
||||
|
||||
|
||||
# ## #自动扩展XFS文件系统到最大的可用大小
|
||||
# xfs_growfs /dev/mapper/centos-root
|
||||
|
||||
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
|
||||
|
||||
# echo "-----------------------------------------------------------------------"
|
||||
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
|
||||
# echo "扩容之后的root目录的容量为:${RootVolumeSizeAfter}"
|
||||
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
|
||||
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
|
||||
|
||||
# echo "恭喜,您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
echo "-----------------------------------------------------------------------"
|
||||
|
||||
export VG_NAME=datavg
|
||||
|
||||
echo "n
|
||||
p
|
||||
|
||||
|
||||
|
||||
t
|
||||
|
||||
8e
|
||||
w
|
||||
" | fdisk /dev/vdb
|
||||
partprobe
|
||||
# 如果已经存在卷组,直接进行添加
|
||||
# vgextend /dev/mapper/centos /dev/vda3
|
||||
vgcreate ${VG_NAME} /dev/vdb1
|
||||
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
|
||||
# 大小根据实际情况调整
|
||||
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
|
||||
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
|
||||
mkdir -p /data
|
||||
mkdir -p /var/lib/docker
|
||||
selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
|
||||
# export selffstab="/dev/mapper/${VG_NAME}-lvdata /data xfs defaults 0 0"
|
||||
echo "${selffstab}" >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
df -TH
|
||||
echo "-----------------------------------------------------------------------"
|
||||
|
||||
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
|
||||
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
|
||||
# xfs_growfs /dev/mapper/${VG_NAME}-root
|
||||
280
44-202411-厦门移动扩容/rke-cluster.yml
Normal file
280
44-202411-厦门移动扩容/rke-cluster.yml
Normal file
@@ -0,0 +1,280 @@
|
||||
nodes:
|
||||
- address: 192.168.0.8
|
||||
user: rke-installer
|
||||
role:
|
||||
- controlplane
|
||||
- etcd
|
||||
- worker
|
||||
internal_address: 192.168.0.8
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.65
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.65
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.45
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.45
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.7
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.7
|
||||
labels:
|
||||
mysql-deploy: true
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.9
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.9
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.10
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.10
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.11
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.11
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.83
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.83
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.84
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.84
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
- address: 192.168.0.85
|
||||
user: rke-installer
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.0.85
|
||||
labels:
|
||||
uavcloud.env: demo
|
||||
|
||||
authentication:
|
||||
strategy: x509
|
||||
sans:
|
||||
- "192.168.0.8"
|
||||
|
||||
private_registries:
|
||||
- url: 192.168.0.8:8033 # 私有镜像库地址
|
||||
user: admin
|
||||
password: "V2ryStr@ngPss"
|
||||
is_default: true
|
||||
|
||||
##############################################################################
|
||||
|
||||
# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
|
||||
ignore_docker_version: true
|
||||
|
||||
# Set the name of the Kubernetes cluster
|
||||
cluster_name: rke-cluster
|
||||
|
||||
kubernetes_version: v1.20.4-rancher1-1
|
||||
|
||||
ssh_key_path: /home/rke-installer/.ssh/id_ed25519
|
||||
|
||||
# Enable running cri-dockerd
|
||||
# Up to Kubernetes 1.23, kubelet contained code called dockershim
|
||||
# to support Docker runtime. The replacement is called cri-dockerd
|
||||
# and should be enabled if you want to keep using Docker as your
|
||||
# container runtime
|
||||
# Only available to enable in Kubernetes 1.21 and higher
|
||||
enable_cri_dockerd: true
|
||||
|
||||
services:
|
||||
etcd:
|
||||
backup_config:
|
||||
enabled: false
|
||||
interval_hours: 72
|
||||
retention: 3
|
||||
safe_timestamp: false
|
||||
timeout: 300
|
||||
creation: 12h
|
||||
extra_args:
|
||||
election-timeout: 5000
|
||||
heartbeat-interval: 500
|
||||
cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
|
||||
gid: 0
|
||||
retention: 72h
|
||||
snapshot: false
|
||||
uid: 0
|
||||
|
||||
kube-api:
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-controller
|
||||
service_cluster_ip_range: 10.74.0.0/16
|
||||
# Expose a different port range for NodePort services
|
||||
service_node_port_range: 30000-40000
|
||||
always_pull_images: true
|
||||
pod_security_policy: false
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Enable audit log to stdout
|
||||
audit-log-path: "-"
|
||||
# Increase number of delete workers
|
||||
delete-collection-workers: 3
|
||||
# Set the level of log output to warning-level
|
||||
v: 1
|
||||
kube-controller:
|
||||
# CIDR pool used to assign IP addresses to pods in the cluster
|
||||
cluster_cidr: 10.100.0.0/16
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-api
|
||||
service_cluster_ip_range: 10.74.0.0/16
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Set the level of log output to debug-level
|
||||
v: 1
|
||||
# Enable RotateKubeletServerCertificate feature gate
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
# Enable TLS Certificates management
|
||||
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
|
||||
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
|
||||
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
|
||||
kubelet:
|
||||
# Base domain for the cluster
|
||||
cluster_domain: cluster.local
|
||||
# IP address for the DNS service endpoint
|
||||
cluster_dns_server: 10.74.0.10
|
||||
# Fail if swap is on
|
||||
fail_swap_on: false
|
||||
# Set max pods to 250 instead of default 110
|
||||
extra_binds:
|
||||
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
|
||||
extra_args:
|
||||
max-pods: 122
|
||||
# Optionally define additional volume binds to a service
|
||||
scheduler:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
|
||||
kubeproxy:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 1
|
||||
|
||||
authorization:
|
||||
mode: rbac
|
||||
|
||||
addon_job_timeout: 30
|
||||
|
||||
# Specify network plugin-in (canal, calico, flannel, weave, or none)
|
||||
network:
|
||||
mtu: 1440
|
||||
options:
|
||||
flannel_backend_type: vxlan
|
||||
plugin: calico
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/unreachable"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
|
||||
# Specify DNS provider (coredns or kube-dns)
|
||||
dns:
|
||||
provider: coredns
|
||||
nodelocal: {}
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 20%
|
||||
maxSurge: 15%
|
||||
linear_autoscaler_params:
|
||||
cores_per_replica: 0.34
|
||||
nodes_per_replica: 4
|
||||
prevent_single_point_failure: true
|
||||
min: 2
|
||||
max: 3
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/unreachable"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
|
||||
# Specify monitoring provider (metrics-server)
|
||||
monitoring:
|
||||
provider: metrics-server
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 8
|
||||
|
||||
ingress:
|
||||
provider: nginx
|
||||
default_backend: true
|
||||
http_port: 0
|
||||
https_port: 0
|
||||
extra_envs:
|
||||
- name: TZ
|
||||
value: Asia/Shanghai
|
||||
node_selector:
|
||||
ingress-deploy: true
|
||||
options:
|
||||
use-forwarded-headers: "true"
|
||||
access-log-path: /var/log/nginx/access.log
|
||||
# client-body-timeout: '6000'
|
||||
# compute-full-forwarded-for: 'true'
|
||||
# enable-underscores-in-headers: 'true'
|
||||
# log-format-escape-json: 'true'
|
||||
# log-format-upstream: >-
|
||||
# { "msec": "$msec", "connection": "$connection", "connection_requests":
|
||||
# "$connection_requests", "pid": "$pid", "request_id": "$request_id",
|
||||
# "request_length": "$request_length", "remote_addr": "$remote_addr",
|
||||
# "remote_user": "$remote_user", "remote_port": "$remote_port",
|
||||
# "http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
|
||||
# "$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
|
||||
# "request_uri": "$request_uri", "args": "$args", "status": "$status",
|
||||
# "body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
|
||||
# "http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
|
||||
# "http_host": "$http_host", "server_name": "$server_name", "request_time":
|
||||
# "$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
|
||||
# "$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
|
||||
# "upstream_response_time": "$upstream_response_time",
|
||||
# "upstream_response_length": "$upstream_response_length",
|
||||
# "upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
|
||||
# "$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
|
||||
# "request_method": "$request_method", "server_protocol": "$server_protocol",
|
||||
# "pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
|
||||
# "geoip_country_code": "$geoip_country_code" }
|
||||
# proxy-body-size: 5120m
|
||||
# proxy-read-timeout: '6000'
|
||||
# proxy-send-timeout: '6000'
|
||||
|
||||
|
||||
79
44-202411-厦门移动扩容/批量命令.sh
Normal file
79
44-202411-厦门移动扩容/批量命令.sh
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
all_server_list=(192.168.0.10)
|
||||
|
||||
all_server_list=(192.168.0.9 192.168.0.10 192.168.0.11 192.168.0.65 192.168.0.45 192.168.0.7)
|
||||
|
||||
all_server_list=(192.168.0.83 192.168.0.84 192.168.0.85)
|
||||
|
||||
for server in "${all_server_list[@]}";do
|
||||
echo "server is ${server}"
|
||||
|
||||
ssh ${server} "echo yes"
|
||||
ssh ${server} "curl -s 10.74.32.6"
|
||||
|
||||
echo ""
|
||||
done
|
||||
|
||||
kubectl taint nodes 192.168.0.9 key=experimental:NoSchedule
|
||||
kubectl taint nodes 192.168.0.10 key=experimental:NoSchedule
|
||||
kubectl taint nodes 192.168.0.11 key=experimental:NoSchedule
|
||||
|
||||
kubectl cordon 192.168.0.9
|
||||
kubectl cordon 192.168.0.10
|
||||
kubectl cordon 192.168.0.11
|
||||
|
||||
|
||||
scp /root/.ssh/* root@${server}:/root/.ssh/
|
||||
|
||||
ssh root@${server} "mkdir /root/wdd"
|
||||
scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
|
||||
ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
|
||||
ssh root@${server} "printf 'ssh\n' | octopus-agent --mode=bastion"
|
||||
|
||||
scp /root/wdd/disk.sh root@${server}:/root/wdd/disk.sh
|
||||
ssh root@${server} "bash /root/wdd/disk.sh"
|
||||
|
||||
ssh root@${server} "mkdir /root/wdd"
|
||||
scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
|
||||
ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
|
||||
ssh root@${server} "printf 'ssh\n' | octopus-agent --mode=bastion"
|
||||
|
||||
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/
|
||||
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
|
||||
ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
|
||||
|
||||
|
||||
scp /etc/docker/daemon.json root@${server}:/etc/docker/
|
||||
|
||||
ssh root@${server} "systemctl restart docker && sleep 3 && docker info"
|
||||
|
||||
echo "SuperRke.123
|
||||
SuperRke.123
|
||||
" | passwd rke-installer
|
||||
|
||||
|
||||
ssh root@${server} "useradd rke-installer"
|
||||
ssh root@${server} "mkdir /home/rke-installer"
|
||||
ssh root@${server} "mkdir /home/rke-installer/.ssh "
|
||||
scp /home/rke-installer/.ssh/* root@${server}:/home/rke-installer/.ssh/
|
||||
ssh root@${server} "chown rke-installer:rke-installer -R /home/rke-installer"
|
||||
ssh root@${server} "usermod -d /home/rke-installer rke-installer"
|
||||
|
||||
ssh root@${server} "gpasswd -a rke-installer docker"
|
||||
ssh root@${server} "newgrp docker"
|
||||
|
||||
|
||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCrEuk9I6spsHv03cSFQmhx0HZ90VBtqBCGlaisasJAK7wkam/AStAvB8Svn6GiNyJFp/vTr2C8MN01poJQ2pWVmj+WcIWVdbrwYmvIsLC6A4dtdxGOEj5Oejzod9JavAwz+EhTjy61T1Xyc7LSafYNtfrir7X7rCZhSN+GhdD9nN0TFbpLax4t84QDU8+dyALzmGEzbPKFZ2BBpbiNu8I0nheRojMdvwCIjV/c+RNEwXKOOk4EXch9mW4HJJ/n36mfC6Dq3CaDaX+IGuK/lgim2wDQFUFfeGWSgiERDZN2sFyxeyliBL39J7gQFXFcVXud+TCujKgpdzmyb+AulKFrUja2kSoA8CLmyiaRkha6SQXkaT/+KVE2q8mNeA6DDxlRR3rBB8MHJ6IKz1+SgwIip8t/ybgthmkBbFWXvjVfIK3cfmMaxqLTlJNEhngLRItLxOa96GivCSziY8CNidWlL4ekC4WF8X+zseBhnFCPG1hmbi4bMcw86CTIFgNhc20= rke-installer@master-node" >>/home/rke-installer/.ssh/authorized_keys
|
||||
|
||||
sed -i "s/\/data/\/var/lib/docker/g" /etc/fstab
|
||||
|
||||
umount /data
|
||||
mount -a
|
||||
df -TH
|
||||
Reference in New Issue
Block a user