This commit is contained in:
zeaslity
2025-03-14 13:48:54 +08:00
parent 77cafaf0a1
commit d8e2c67e36
38 changed files with 1051 additions and 39 deletions

View File

@@ -0,0 +1,100 @@
#!/bin/bash
# vda 252:0 0 500G 0 disk
# ├─vda1 252:1 0 1G 0 part /boot
# └─vda2 252:2 0 119G 0 part
# ├─klas-root 253:0 0 74.7G 0 lvm /
# ├─klas-swap 253:1 0 7.9G 0 lvm
# └─klas-backup 253:2 0 36.4G 0 lvm
# 系统的磁盘划分为上图所示请给出shell命令实现如下的功能
# 1. 关闭/dev/klas/swap /dev/klas/backup 将vda2的磁空间全部分给/dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
# 3. 在pv /dev/vda3上创建 vg datavg大小为全部空间
# 4. 在vg datavg上创建lv docklv大小为全部空间
# 5. 将lv docklv格式化为xfs格式
# 6. 将lv docklv挂载到 /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
# 1. 关闭/dev/klas/swap 和 /dev/klas/backup将vda2的空间全部分给/dev/klas/root
swapoff /dev/klas/swap
lvremove /dev/klas/swap
# lvremove /dev/klas/backup
lvextend -l +100%FREE /dev/klas/root
xfs_growfs /dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
if ! lsblk | grep -q vdb; then
echo "/dev/vdb does not exist, skipping partition creation"
exit 0
fi
# 1. 调整磁盘 /dev/vdb 的分区表为 GPT 格式
echo yes | parted /dev/vdb mklabel gpt
echo "/dev/vdb exists, starting to partition creation"
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
pvcreate /dev/vdb1
# 3. 在pv /dev/vdb1上创建 vg datavg大小为全部空间
vgcreate datavg /dev/vdb1
# 4. 在vg datavg上创建lv docklv大小为全部空间
lvcreate -l 100%FREE -n docklv datavg
# 5. 将lv docklv格式化为xfs格式
mkfs.xfs /dev/datavg/docklv
# 6. 将lv docklv挂载到 /var/lib/docker
mkdir -p /var/lib/docker
mount /dev/datavg/docklv /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
echo "/dev/datavg/docklv /var/lib/docker xfs defaults 0 0" >> /etc/fstab
echo ""
df -TH
echo ""
lsblk
echo ""
# 特殊大于2T的磁盘
请写shell命令实现如下的功能
1 请调整磁盘/vda的分区表为GPT格式
2 然后将磁盘剩余空间扩容给/dev/vda3的分区
3 然后扩容在vg datavg
4 然后扩容 将lv docklv
# 1. 调整磁盘 /dev/vda 的分区表为 GPT 格式
parted /dev/vda mklabel gpt
# 2. 将磁盘剩余空间扩容给 /dev/vda3 的分区
parted /dev/vda resizepart 3 100%
# 3. 扩容 vg datavg
pvresize /dev/vda3
# 4. 扩容 lv docklv
lvextend -l +100%FREE /dev/datavg/docklv
resize2fs /dev/datavg/docklv
# 写shell命令实现如下的功能
删除 /etc/fstab中以 /dev/mapper/klas-backup 和 /dev/mapper/klas-swap 开头的行
sed -i '/^\/dev\/mapper\/klas-\(backup\|swap\)/d' /etc/fstab

View File

@@ -0,0 +1,260 @@
nodes:
- address: 172.16.100.55
user: root
role:
- controlplane
- etcd
- worker
internal_address: 172.16.100.55
labels:
ingress-deploy: true
- address: 172.16.100.51
user: root
role:
- worker
internal_address: 172.16.100.51
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.52
user: root
role:
- worker
internal_address: 172.16.100.52
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.53
user: root
role:
- worker
internal_address: 172.16.100.53
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.56
user: root
role:
- worker
internal_address: 172.16.100.56
labels:
doris-deploy: true
- address: 172.16.100.57
user: root
role:
- worker
internal_address: 172.16.100.57
labels:
doris-deploy: true
- address: 172.16.100.58
user: root
role:
- worker
internal_address: 172.16.100.58
labels:
doris-deploy: true
- address: 172.16.100.59
user: root
role:
- worker
internal_address: 172.16.100.59
labels:
uavcloud.env: "eedsjc-uavms"
- address: 172.16.100.60
user: root
role:
- worker
internal_address: 172.16.100.60
labels:
uavcloud.env: "eedsjc-uavms"
- address: 172.16.100.62
user: root
role:
- worker
internal_address: 172.16.100.62
labels:
uavcloud.env: "eedsjc-uavms"
authentication:
strategy: x509
sans:
- "172.16.100.55"
private_registries:
- url: 172.16.100.55:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
#ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens18
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: calico
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,80 @@
#!/bin/bash
# vda 252:0 0 500G 0 disk
# ├─vda1 252:1 0 1G 0 part /boot
# └─vda2 252:2 0 119G 0 part
# ├─klas-root 253:0 0 74.7G 0 lvm /
# ├─klas-swap 253:1 0 7.9G 0 lvm
# └─klas-backup 253:2 0 36.4G 0 lvm
# 系统的磁盘划分为上图所示请给出shell命令实现如下的功能
# 1. 关闭/dev/klas/swap /dev/klas/backup 将vda2的磁空间全部分给/dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
# 3. 在pv /dev/vda3上创建 vg datavg大小为全部空间
# 4. 在vg datavg上创建lv docklv大小为全部空间
# 5. 将lv docklv格式化为xfs格式
# 6. 将lv docklv挂载到 /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
# 1. 关闭/dev/kals/swap 和 /dev/kals/backup将vda2的空间全部分给/dev/klas/root
swapoff /dev/kals/swap
echo "y\n" | lvremove /dev/kals/swap
# lvremove /dev/klas/backup
lvextend -l +100%FREE /dev/kals/root
xfs_growfs /dev/kals/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
if ! lsblk | grep -q vdb; then
echo ""
echo "/dev/vdb does not exist, skipping partition creation"
echo ""
df -TH
echo ""
lsblk
echo ""
exit 0
fi
# 1. 调整磁盘 /dev/vdb 的分区表为 GPT 格式
echo yes | parted /dev/vdb mklabel gpt
echo "/dev/vdb exists, starting to partition creation"
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
pvcreate /dev/vdb1
# 3. 在pv /dev/vdb1上创建 vg datavg大小为全部空间
vgcreate datavg /dev/vdb1
# 4. 在vg datavg上创建lv docklv大小为全部空间
lvcreate -l 100%FREE -n docklv datavg
# 5. 将lv docklv格式化为xfs格式
mkfs.xfs /dev/datavg/docklv
# 6. 将lv docklv挂载到 /var/lib/docker
mkdir -p /var/lib/docker
mount /dev/datavg/docklv /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
echo "/dev/datavg/docklv /var/lib/docker xfs defaults 0 0" >> /etc/fstab
echo ""
df -TH
echo ""
lsblk
echo ""

View File

@@ -0,0 +1,194 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-2-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.57
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-3-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.58
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-2-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.57
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-3-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.58
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-2-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-3-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-500g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeMode: Filesystem
---

View File

@@ -0,0 +1,6 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-static-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,43 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-static-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-500g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeMode: Filesystem
---