This commit is contained in:
zeaslity
2025-03-14 13:48:54 +08:00
parent 77cafaf0a1
commit d8e2c67e36
38 changed files with 1051 additions and 39 deletions

View File

@@ -6,17 +6,12 @@ worker_server_list=(192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104) all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
all_server_list=(dev-worker-01 dev-worker-02 dev-worker-03 dev-worker-05 dev-worker-06 dev-worker-07 dev-worker-08 dev-worker-09) all_server_list=(dev-worker-01 dev-worker-02 dev-worker-03 dev-worker-05 dev-worker-06 dev-worker-07 dev-worker-08 dev-worker-09)
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104) all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104 192.168.40.53 192.168.40.54 192.168.40.55)
for server in "${all_server_list[@]}";do for server in "${all_server_list[@]}";do
echo "server is ${server}" echo "server is ${server}"
ssh root@"${server}" "df -TH" ssh root@"${server}" "timedatectl status"
done done

View File

@@ -0,0 +1,159 @@
server is 192.168.35.105
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:29 CST
Universal time: Wed 2025-03-12 07:31:29 UTC
RTC time: Wed 2025-03-12 06:48:28
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.114
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:30 CST
Universal time: Wed 2025-03-12 07:31:30 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.115
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:30 CST
Universal time: Wed 2025-03-12 07:31:30 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.55
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:30 CST
Universal time: Wed 2025-03-12 07:31:30 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.86
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:31 CST
Universal time: Wed 2025-03-12 07:31:31 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.89
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:31 CST
Universal time: Wed 2025-03-12 07:31:31 UTC
RTC time: Wed 2025-03-12 06:48:31
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.93
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:32 CST
Universal time: Wed 2025-03-12 07:31:32 UTC
RTC time: Wed 2025-03-12 06:48:31
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.95
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:32 CST
Universal time: Wed 2025-03-12 07:31:32 UTC
RTC time: Wed 2025-03-12 06:48:31
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.96
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:33 CST
Universal time: Wed 2025-03-12 07:31:33 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.101
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:33 CST
Universal time: Wed 2025-03-12 07:31:33 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.103
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:34 CST
Universal time: Wed 2025-03-12 07:31:34 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.104
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:34 CST
Universal time: Wed 2025-03-12 07:31:34 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.40.53
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:36 CST
Universal time: Wed 2025-03-12 07:31:36 UTC
RTC time: Wed 2025-03-12 07:31:36
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.40.54
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:39 CST
Universal time: Wed 2025-03-12 07:31:39 UTC
RTC time: Wed 2025-03-12 07:31:39
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.40.55
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:43 CST
Universal time: Wed 2025-03-12 07:31:43 UTC
RTC time: Wed 2025-03-12 07:31:43
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no

View File

@@ -0,0 +1,18 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-backend-log-pvc
namespace: gsyd-app
labels:
app.kubernetes.io/version: 6.0.0
cmii.app: nfs-backend-log-pvc
cmii.type: middleware-base
helm.sh/chart: all-persistence-volume-claims-1.1.0
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
storageClassName: nfs-prod-distribute
volumeMode: Filesystem

View File

@@ -0,0 +1,100 @@
#!/bin/bash
# vda 252:0 0 500G 0 disk
# ├─vda1 252:1 0 1G 0 part /boot
# └─vda2 252:2 0 119G 0 part
# ├─klas-root 253:0 0 74.7G 0 lvm /
# ├─klas-swap 253:1 0 7.9G 0 lvm
# └─klas-backup 253:2 0 36.4G 0 lvm
# 系统的磁盘划分为上图所示请给出shell命令实现如下的功能
# 1. 关闭/dev/klas/swap /dev/klas/backup 将vda2的磁空间全部分给/dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
# 3. 在pv /dev/vda3上创建 vg datavg大小为全部空间
# 4. 在vg datavg上创建lv docklv大小为全部空间
# 5. 将lv docklv格式化为xfs格式
# 6. 将lv docklv挂载到 /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
# 1. 关闭/dev/klas/swap 和 /dev/klas/backup将vda2的空间全部分给/dev/klas/root
swapoff /dev/klas/swap
lvremove /dev/klas/swap
# lvremove /dev/klas/backup
lvextend -l +100%FREE /dev/klas/root
xfs_growfs /dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
if ! lsblk | grep -q vdb; then
echo "/dev/vdb does not exist, skipping partition creation"
exit 0
fi
# 1. 调整磁盘 /dev/vdb 的分区表为 GPT 格式
echo yes | parted /dev/vdb mklabel gpt
echo "/dev/vdb exists, starting to partition creation"
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
pvcreate /dev/vdb1
# 3. 在pv /dev/vdb1上创建 vg datavg大小为全部空间
vgcreate datavg /dev/vdb1
# 4. 在vg datavg上创建lv docklv大小为全部空间
lvcreate -l 100%FREE -n docklv datavg
# 5. 将lv docklv格式化为xfs格式
mkfs.xfs /dev/datavg/docklv
# 6. 将lv docklv挂载到 /var/lib/docker
mkdir -p /var/lib/docker
mount /dev/datavg/docklv /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
echo "/dev/datavg/docklv /var/lib/docker xfs defaults 0 0" >> /etc/fstab
echo ""
df -TH
echo ""
lsblk
echo ""
# 特殊大于2T的磁盘
请写shell命令实现如下的功能
1 请调整磁盘/vda的分区表为GPT格式
2 然后将磁盘剩余空间扩容给/dev/vda3的分区
3 然后扩容在vg datavg
4 然后扩容 将lv docklv
# 1. 调整磁盘 /dev/vda 的分区表为 GPT 格式
parted /dev/vda mklabel gpt
# 2. 将磁盘剩余空间扩容给 /dev/vda3 的分区
parted /dev/vda resizepart 3 100%
# 3. 扩容 vg datavg
pvresize /dev/vda3
# 4. 扩容 lv docklv
lvextend -l +100%FREE /dev/datavg/docklv
resize2fs /dev/datavg/docklv
# 写shell命令实现如下的功能
删除 /etc/fstab中以 /dev/mapper/klas-backup 和 /dev/mapper/klas-swap 开头的行
sed -i '/^\/dev\/mapper\/klas-\(backup\|swap\)/d' /etc/fstab

View File

@@ -0,0 +1,260 @@
nodes:
- address: 172.16.100.55
user: root
role:
- controlplane
- etcd
- worker
internal_address: 172.16.100.55
labels:
ingress-deploy: true
- address: 172.16.100.51
user: root
role:
- worker
internal_address: 172.16.100.51
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.52
user: root
role:
- worker
internal_address: 172.16.100.52
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.53
user: root
role:
- worker
internal_address: 172.16.100.53
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.56
user: root
role:
- worker
internal_address: 172.16.100.56
labels:
doris-deploy: true
- address: 172.16.100.57
user: root
role:
- worker
internal_address: 172.16.100.57
labels:
doris-deploy: true
- address: 172.16.100.58
user: root
role:
- worker
internal_address: 172.16.100.58
labels:
doris-deploy: true
- address: 172.16.100.59
user: root
role:
- worker
internal_address: 172.16.100.59
labels:
uavcloud.env: "eedsjc-uavms"
- address: 172.16.100.60
user: root
role:
- worker
internal_address: 172.16.100.60
labels:
uavcloud.env: "eedsjc-uavms"
- address: 172.16.100.62
user: root
role:
- worker
internal_address: 172.16.100.62
labels:
uavcloud.env: "eedsjc-uavms"
authentication:
strategy: x509
sans:
- "172.16.100.55"
private_registries:
- url: 172.16.100.55:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
#ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens18
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: calico
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,80 @@
#!/bin/bash
# vda 252:0 0 500G 0 disk
# ├─vda1 252:1 0 1G 0 part /boot
# └─vda2 252:2 0 119G 0 part
# ├─klas-root 253:0 0 74.7G 0 lvm /
# ├─klas-swap 253:1 0 7.9G 0 lvm
# └─klas-backup 253:2 0 36.4G 0 lvm
# 系统的磁盘划分为上图所示请给出shell命令实现如下的功能
# 1. 关闭/dev/klas/swap /dev/klas/backup 将vda2的磁空间全部分给/dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
# 3. 在pv /dev/vda3上创建 vg datavg大小为全部空间
# 4. 在vg datavg上创建lv docklv大小为全部空间
# 5. 将lv docklv格式化为xfs格式
# 6. 将lv docklv挂载到 /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
# 1. 关闭/dev/kals/swap 和 /dev/kals/backup将vda2的空间全部分给/dev/klas/root
swapoff /dev/kals/swap
echo "y\n" | lvremove /dev/kals/swap
# lvremove /dev/klas/backup
lvextend -l +100%FREE /dev/kals/root
xfs_growfs /dev/kals/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
if ! lsblk | grep -q vdb; then
echo ""
echo "/dev/vdb does not exist, skipping partition creation"
echo ""
df -TH
echo ""
lsblk
echo ""
exit 0
fi
# 1. 调整磁盘 /dev/vdb 的分区表为 GPT 格式
echo yes | parted /dev/vdb mklabel gpt
echo "/dev/vdb exists, starting to partition creation"
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
pvcreate /dev/vdb1
# 3. 在pv /dev/vdb1上创建 vg datavg大小为全部空间
vgcreate datavg /dev/vdb1
# 4. 在vg datavg上创建lv docklv大小为全部空间
lvcreate -l 100%FREE -n docklv datavg
# 5. 将lv docklv格式化为xfs格式
mkfs.xfs /dev/datavg/docklv
# 6. 将lv docklv挂载到 /var/lib/docker
mkdir -p /var/lib/docker
mount /dev/datavg/docklv /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
echo "/dev/datavg/docklv /var/lib/docker xfs defaults 0 0" >> /etc/fstab
echo ""
df -TH
echo ""
lsblk
echo ""

View File

@@ -0,0 +1,194 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-2-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.57
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-3-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.58
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-2-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.57
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-3-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.58
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-2-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-3-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-500g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeMode: Filesystem
---

View File

@@ -0,0 +1,6 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-static-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,43 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-static-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-500g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeMode: Filesystem
---

View File

@@ -0,0 +1,84 @@
### 调查报告
#### 引言
本文详细探讨了如何在不影响 MongoDB 实际数据的情况下,通过删除文件来释放存储空间。基于用户提供的文件列表,我们分析了每个文件的用途,并提供了安全删除的建议。这份报告旨在为用户提供全面的指导,同时强调手动操作的风险,并推荐更安全的替代方法。
#### 文件分析与分类
用户提供的文件列表包括多种类型的文件,如 `WiredTiger.wt``collection-*``index-*` 等。这些文件是 MongoDB 使用 WiredTiger 存储引擎时的数据目录中的典型组成部分。以下是详细分析:
- **实际数据文件**
- `collection-*` 文件(如 `collection-0-1702428478038935225.wt`)存储集合中的文档数据。删除这些文件将导致数据丢失,因此绝对不能删除。
- 示例:`collection-0--1757014832397380742.wt``collection-4--3349146543354658687.wt`
- **索引文件**
- `index-*` 文件(如 `index-5-1702428478038935225.wt`)存储集合的索引。删除这些文件不会影响实际数据,但会移除索引,可能显著降低查询性能,尤其是对于复杂的查询。
- 示例:`index-0--3349146543354658687.wt``index-6--3597060289595503483.wt`
- 研究表明,索引文件的大小可能占数据目录的很大一部分,删除它们可以释放显著的空间,但需要权衡性能影响。
- **元数据和核心文件**
- `WiredTiger.wt` 是数据库的主要元数据文件,存储所有表的配置和检查点信息。删除它会导致数据库无法启动,绝对不能删除。
- `WiredTigerHS.wt` 是事务历史存储文件,用于支持事务完整性,删除可能导致事务相关问题。
- `_mdb_catalog.wt` 是目录文件,包含数据库的元数据,删除会导致数据库不可用。
- `sizeStorer.wt` 存储大小信息,删除可能会影响空间管理,启动时可能出错。
- **锁文件**
- `WiredTiger.lock``mongod.lock` 是锁文件,用于防止多个实例同时访问数据库。手动删除可能导致数据损坏,不建议删除。
- **日志和临时文件**
- `journal` 文件用于写前日志,确保崩溃恢复时的数据一致性。删除它们风险很高,可能导致数据丢失或不一致,不建议手动操作。
- `_tmp` 文件是临时文件,通常用于操作过程中。如果数据库当前未运行或没有正在进行的操作,删除它们可能是安全的。
- `diagnostic.data` 文件包含诊断信息(如服务器状态的二进制格式),用于故障排查。删除它不会影响数据,但可能会影响未来的问题诊断。
- **不明文件**
- `storage.bson` 在标准 MongoDB 数据目录中不常见,可能是用户自定义文件或备份文件。未经确认用途,不建议删除,以免影响数据。
#### 安全删除的建议
基于上述分析,以下是可以在不影响实际数据的情况下考虑删除的文件:
- **`diagnostic.data`**:这是诊断信息文件,不包含实际数据,删除是安全的。如果空间紧张且不需要诊断信息,可以删除。
- **`_tmp` 文件**:这些是临时文件,如果数据库未运行或确认没有正在进行的操作,可以删除。但需注意,删除过程中可能影响正在进行的操作。
#### 删除索引文件的权衡
如果需要更多空间,可以考虑删除 `index-*` 文件,但需注意以下几点:
- 删除索引文件会移除对应的索引,这不会影响实际数据(文档),但会显著降低查询性能,尤其是依赖索引的查询。
- 示例文件:`index-5--3349146543354658687.wt``index-8--3597060289595503483.wt`
- 建议:如果某些索引不再需要(例如,开发环境或不再使用的查询),可以删除对应的文件。但在生产环境中,建议先评估性能影响。
#### 不建议删除的文件
以下文件不建议删除,因为它们对数据库的正常运行至关重要:
- `collection-*` 文件:包含实际数据,删除会导致数据丢失。
- `WiredTiger.wt``WiredTigerHS.wt``sizeStorer.wt`:这些是核心元数据和历史文件,删除会导致数据库无法启动或数据不一致。
- `journal` 文件:用于崩溃恢复,删除可能导致数据丢失。
- `WiredTiger.lock``mongod.lock`:锁文件,删除可能导致多实例访问冲突。
#### 替代方法:使用 `compact` 命令
手动删除文件存在风险,推荐使用 MongoDB 的 `compact` 命令来安全回收空间。该命令会重写数据和索引,释放未使用的磁盘空间,尤其适合 WiredTiger 存储引擎。操作步骤如下:
1. 连接到 MongoDB 实例,使用 `mongosh`
2. 对每个集合运行 `db.collection.compact()`,例如 `db.myCollection.compact()`
3. 注意:`compact` 操作需要额外的磁盘空间(约 2GB并会锁定数据库建议在维护窗口期间执行。
有关详细信息,请参考 [MongoDB 官方文档](https://www.mongodb.com/docs/manual/tutorial/compact-collections/)
#### 风险与注意事项
- **手动删除的风险**MongoDB 的文件系统高度集成,删除错误文件可能导致数据库损坏或数据丢失。建议在操作前备份所有文件。
- **诊断信息的重要性**`diagnostic.data` 文件虽然可以删除,但如果未来需要排查问题,可能会缺少关键信息。
- **性能影响**:删除索引文件会影响查询性能,建议在删除前评估业务需求。
#### 表格:文件类型与删除建议
| 文件类型 | 示例文件 | 包含内容 | 是否可以删除 | 影响 |
|-------------------|-----------------------------------|------------------------------|--------------------|--------------------------|
| 集合数据文件 | `collection-0-1702428478038935225.wt` | 实际文档数据 || 数据丢失 |
| 索引文件 | `index-5-1702428478038935225.wt` | 索引数据 | 是(谨慎) | 查询性能下降 |
| 元数据文件 | `WiredTiger.wt` | 数据库配置和检查点 || 数据库无法启动 |
| 历史存储文件 | `WiredTigerHS.wt` | 事务历史 || 事务完整性问题 |
| 临时文件 | `_tmp` | 临时操作数据 | 是(数据库未运行时)| 可能影响正在进行的操作 |
| 诊断文件 | `diagnostic.data` | 服务器状态信息 || 未来诊断可能困难 |
| 日志文件 | `journal` | 写前日志,用于崩溃恢复 || 数据不一致或丢失 |
| 锁文件 | `WiredTiger.lock` | 防止多实例访问 || 数据损坏风险 |
| 大小存储文件 | `sizeStorer.wt` | 空间大小信息 || 启动可能失败 |
| 不明文件 | `storage.bson` | 未知,可能是自定义文件 | 否(未经确认) | 可能影响数据 |
#### 结论
为了释放空间,建议首先删除 `diagnostic.data``_tmp` 文件,这些操作相对安全,不会影响实际数据。如果需要更多空间,可以考虑删除 `index-*` 文件,但需权衡性能影响。强烈建议避免手动删除 `journal` 文件和其他核心文件,并优先使用 `compact` 命令来安全回收空间。

View File

@@ -0,0 +1,29 @@
关于中移凌云无法实现一键部署的相关原因说明
1. 系统固有原因:
1.1 系统复杂,微服务数量众多,涉及到各服务之间配置及依赖关系复杂,无法通过脚本处理
1.2 基础设施复杂涉及多主机k8s集群部署等部署流程链路长故障点可能发生在任何意想不到的地方比如涉及到客户服务器环境、网络条件等客观情况。
1.3 涉及跨主机,之间的流程编排,一键部署难度非常困难
2. 客观原因
2.1 客户本地化部署环境完全不可控
2.1.1 即使操作系统相同,由于版本的不同,就会导致部署依赖的文件不同
2.1.2 客户网络底层架构不同会导致k8s跨主机的网络插件产生不可抗力的异常情况必须人工介入排查
2.1.3 即使对于不同操作系统不同版本 都做了相应的适配工作,实际情况是,仍然产生意料之外的异常,需要人工介入
3. 与竞品比较
3.1 大疆同类产品也绝无一键部署之宣传
3.2 微服务架构,暂未听说过能够实现一键部署的先例
4. 工作重心原因
4.1 低空经济中心的主要职责在于打磨优秀的产品,无法将巨量的时间用于进行交付流程的优化工作
4.2 希望技术支撑的交付团队,能够深感工作之重要,自行探索打造快速交付部署的“一键部署”流程
5. 已有部署流程优化工作
5.1 针对部署流程中固化的的步骤,我们已经努力实现了部分的部署流程一键化工作
5.2 docker的离线安装流程-可以实现屏蔽操作系统差异,支持国产化环境部署等
5.3 k8s集群安装流程-可以实现一键启动k8s集群
5.4 harbor安装流程-可以实现一键安装启动harbor
5.5 MySQL Redis Mongo Emqx RabbitMQ - 可以实现一键运行启动中间件的工作

View File

@@ -4,7 +4,7 @@
rm -f /usr/local/bin/agent-wdd rm -f /usr/local/bin/agent-wdd
rm -f /usr/local/bin/test-shell.sh rm -f /usr/local/bin/test-shell.sh
wget https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_amd64 -qO /usr/local/bin/agent-wdd wget https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_amd64 -O /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd chmod +x /usr/local/bin/agent-wdd
@@ -17,9 +17,9 @@ export oss_url_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/downloadfil
# export oss_url_prefix=http://42.192.52.227:9000/octopus # export oss_url_prefix=http://42.192.52.227:9000/octopus
wget ${oss_url_prefix}/docker-amd64-20.10.15.tgz wget ${oss_url_prefix}/docker-amd64-20.10.15.tgz
wget ${oss_url_prefix}/docker-compose-linux-x86_64-v2.18.0 wget ${oss_url_prefix}/docker-compose-v2.18.0-linux-amd64
wget ${oss_url_prefix/harbor-offline-installer-v2.9.0.tgz wget ${oss_url_prefix}/harbor-offline-installer-v2.9.0.tgz
wget ${oss_url_prefix}/rke_linux-amd64 wget ${oss_url_prefix}/rke_linux-amd64
wget ${oss_url_prefix}/kubectl-1.20.4-amd64 wget ${oss_url_prefix}/kubectl-1.20.4-amd64

View File

@@ -15,6 +15,9 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base selinux /usr/local/bin/agent-wdd base selinux
/usr/local/bin/agent-wdd base sysconfig /usr/local/bin/agent-wdd base sysconfig
/usr/local/bin/agent-wdd zsh
# 首先需要下载所有的依赖!
/usr/local/bin/agent-wdd base docker local /usr/local/bin/agent-wdd base docker local
/usr/local/bin/agent-wdd base dockercompose local /usr/local/bin/agent-wdd base dockercompose local
@@ -22,15 +25,26 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
# 仅在主节点执行 # 仅在主节点执行
/usr/local/bin/agent-wdd base docker config /usr/local/bin/agent-wdd base docker config
/usr/local/bin/agent-wdd base harbor install
# 批量执行命令 # 批量执行命令
host_list=( host_list=(
172.16.100.50 172.16.100.56
172.16.100.57
172.16.100.58
172.16.100.61
)
//
host_list=(
172.16.100.62
172.16.100.51 172.16.100.51
172.16.100.52 172.16.100.52
172.16.100.53 172.16.100.53
172.16.100.54 172.16.100.54
172.16.100.55
172.16.100.56 172.16.100.56
172.16.100.57 172.16.100.57
172.16.100.58 172.16.100.58
@@ -40,20 +54,47 @@ host_list=(
) )
for server in "${host_list[@]}";do for server in "${host_list[@]}";do
echo "current ip is $server" echo "current ip is $server"
# ssh root@${server} "echo yes" ssh root@${server} "curl -s http://172.24.65.135"
echo ""
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
done done
# 复制 同步文件 # 复制 同步文件
export server=172.16.100.62
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config" ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key" ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
# 磁盘初始化
ssh root@${server} "mkdir /root/wdd" ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz scp /root/wdd/disk.sh root@${server}:/root/wdd/
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/ ssh root@${server} "bash /root/wdd/disk.sh"
# 复制文件-docker
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
# 批量执行agent-wdd的命令
ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
ssh root@${server} "/usr/local/bin/agent-wdd base selinux"
ssh root@${server} "/usr/local/bin/agent-wdd base sysconfig"
ssh root@${server} "/usr/local/bin/agent-wdd base docker local"
ssh root@${server} "/usr/local/bin/agent-wdd base dockercompose local"
# 仅在主节点执行
/usr/local/bin/agent-wdd base docker config
# 下发docker的配置
scp /etc/docker/daemon.json root@${server}:/etc/docker/daemon.json
ssh root@${server} "cat /etc/docker/daemon.json"
ssh root@${server} "systemctl restart docker"
ssh root@${server} "docker info"
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0

View File

@@ -1,6 +1,6 @@
export harbor_host=192.168.35.71:8033 export harbor_host=172.16.100.55:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -8,7 +8,7 @@ env:
value: "eth0" value: "eth0"
# 更加保险 # 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens160 kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens18
# 删除所有的calico pod # 删除所有的calico pod

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
namespace=jxyd namespace=gsyd-app
# 优雅地处理Deployment缩容 # 优雅地处理Deployment缩容
scale_deployments() { scale_deployments() {

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
namespace=jlyd namespace=gsyd-app
install_yq() { install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
@@ -44,9 +44,9 @@ backup_all_stateful_sets() {
} }
install_yq install_yq
# backup_all_deployment backup_all_deployment
# backup_all_service backup_all_service
# backup_all_stateful_sets backup_all_stateful_sets
backup_all_configmap backup_all_configmap
# https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 # https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64

View File

@@ -5,7 +5,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq chmod +x /usr/local/bin/jq
export name_space=jxyd export name_space=ingress-nginx
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force kubectl delete pods -n $name_space --field-selector status.phase!=Running --force
@@ -13,4 +13,6 @@ kubectl get pods -n $name_space -o json | jq -r '.items[] | select(.status.conta
kubectl -n ${name_space} delete pod helm-nacos-0 --force kubectl -n ${name_space} delete pod helm-nacos-0 --force
kubectl -n ${name_space} logs helm-nacos-0

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
harbor_host=10.20.1.130:8033 harbor_host=172.16.0.31:8033
namespace=jxyd namespace=shbj
app_name="" app_name=""
new_tag="" new_tag=""

View File

@@ -5,7 +5,7 @@ gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation" oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image" local_gzip_path="/root/octopus-image"
DockerRegisterDomain="192.168.10.3:8033" # 需要根据实际修改 DockerRegisterDomain="172.16.100.55:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致 HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() { print_green() {
@@ -163,6 +163,6 @@ test(){
} }
# test # test
#Download_Load_Tag_Upload "rke" Download_Load_Tag_Upload "rke"
Load_Tag_Upload "cmii" # Load_Tag_Upload "cmii"

View File

@@ -1,14 +1,15 @@
version: '2' version: '2'
services: services:
minio1: minio:
image: minio/minio:RELEASE.2023-06-02T23-17-26Z
container_name: live-minio
ports: ports:
- "9000:9000" - "9000:9000"
- "9001:9001" - "9001:9001"
image: 'A1C2IP:8033/cmii/minio:2022.5.4' command: 'server /data --console-address ":9001"'
environment:
- MINIO_ROOT_USER=cmii
- MINIO_ROOT_PASSWORD=B#923fC7mk
restart: always restart: always
environment:
MINIO_ACCESS_KEY: cmii
MINIO_SECRET_KEY: B#923fC7mk
volumes: volumes:
- /data/minio-pv/pv1:/data - /var/lib/docker/miniodata/minio-pv:/data