完成 72绵阳项目 71雄安集团监管平台 大量优化更新

This commit is contained in:
zeaslity
2026-02-03 17:07:28 +08:00
parent d962ace967
commit a8f6bda703
93 changed files with 21632 additions and 185 deletions

View File

@@ -15,7 +15,7 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base selinux
/usr/local/bin/agent-wdd base sysconfig
/usr/local/bin/agent-wdd zsh
/usr/local/bin/agent-wdd zsh cn
# 首先需要下载所有的依赖!
@@ -56,8 +56,7 @@ done
export server=172.16.100.62
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config && /usr/local/bin/agent-wdd base ssh key"
# 安装docker-compose
@@ -66,12 +65,35 @@ chmod +x /usr/local/bin/docker-compose
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
# APT代理加速
scp /root/wdd/apt-change.sh root@${server}:/root/wdd/apt-change.sh
ssh root@${server} "bash /root/wdd/apt-change.sh -y"
ssh root@${server} "echo \"\"> /etc/apt/apt.conf.d/01proxy"
ssh root@${server} "printf '%s\n' \
'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
| tee /etc/apt/apt.conf.d/01proxy >/dev/null"
ssh root@${server} "apt-get update"
ssh root@${server} "apt-get install -y parted"
# 磁盘初始化
ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/disk.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/disk.sh"
# master节点安装docker
bash /root/wdd/docker.sh
# 在线安装docker 通过APT代理
scp /etc/apt/keyrings/docker.gpg root@${server}:/root/wdd/
scp /root/wdd/docker.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/docker.sh"
ssh root@${server} "docker info"
ssh root@${server} "docker compose version"
# 复制文件-docker
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
@@ -81,7 +103,6 @@ ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
ssh root@${server} "/usr/local/bin/agent-wdd base selinux"
@@ -101,19 +122,34 @@ ssh root@${server} "docker info"
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
ssh root@${server} "rm /root/wdd/*.sh"
# 主节点执行 安装harbor仓库
/usr/local/bin/agent-wdd base harbor install
# 安装rke kubectl
mv /root/wdd/rke_amd64 /usr/local/bin/rke
mv /root/wdd/rke_linux-amd64 /usr/local/bin/rke
chmod +x /usr/local/bin/rke
mv /root/wdd/kubectl /usr/local/bin/kubectl
mv /root/wdd/kubectl_v1.30.14_amd64 /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl
# 安装 k8s-证书
mkdir /root/.kube
cp ./kube_config_cluster.yml /root/.kube/config
curl -s https://172.29.137.125
# 环境测试
DEFAULT_HTTP_BACKEND_IP=$(kubectl -n ingress-nginx get svc default-http-backend -o jsonpath='{.spec.clusterIP}')
# master节点
curl -s "http://${DEFAULT_HTTP_BACKEND_IP}"x
# worker节点
ssh root@"$server" "DEFAULT_HTTP_BACKEND_IP='$DEFAULT_HTTP_BACKEND_IP' bash -s" <<'EOF'
echo "DEFAULT_HTTP_BACKEND_IP=$DEFAULT_HTTP_BACKEND_IP"
curl -s "http://${DEFAULT_HTTP_BACKEND_IP}"
echo
EOF

View File

@@ -1,112 +0,0 @@
#!/bin/bash
set -eo pipefail
# 定义脚本参数
DOCKER_VERSION="20.10" # 在这里修改期望的版本
UBUNTU_IDS=("18.04" "20.04" "22.04" "24.04")
ALIYUN_MIRROR="https://mirrors.aliyun.com"
DOCKER_COMPOSE_VERSION="2.26.1"
# 1. 检测Ubuntu环境
check_ubuntu() {
if ! command -v lsb_release &> /dev/null || [[ $(lsb_release -is) != "Ubuntu" ]]; then
echo "错误本脚本仅支持Ubuntu系统"
exit 1
fi
local version_id=$(lsb_release -rs)
if [[ ! " ${UBUNTU_IDS[*]} " =~ " ${version_id} " ]]; then
echo "错误不支持的Ubuntu版本 ${version_id},支持版本:${UBUNTU_IDS[*]}"
exit 1
fi
}
# 2. 替换阿里云源
set_aliyun_mirror() {
sudo sed -i "s/archive.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo sed -i "s/security.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates
}
# 3. 准备Docker仓库
prepare_docker_env() {
sudo mkdir -p /etc/apt/keyrings
curl -fsSL $ALIYUN_MIRROR/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
local codename=$(lsb_release -cs)
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $ALIYUN_MIRROR/docker-ce/linux/ubuntu $codename stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
}
# 4. 版本解析优化版本
get_docker_version() {
local target_version=""
if [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then
# 提取大版本下最高小版本
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}([.-]|\~\w+)" \
| sort -rV \
| head -1)
elif [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# 精确版本匹配
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}.*$(lsb_release -cs)" )
fi
[ -z "$target_version" ] && echo "错误找不到Docker版本 $DOCKER_VERSION" && exit 1
echo "$target_version" | sed 's/^[0-9]+://' # 去除前缀
}
# 5. 主流程
main() {
check_ubuntu
echo "-- 设置阿里云源 --"
set_aliyun_mirror
echo "-- 准备Docker仓库 --"
prepare_docker_env
echo "-- 解析Docker版本 --"
local full_version=$(get_docker_version)
echo "选择版本:$full_version"
echo "-- 安装组件 --"
sudo apt-get install -y \
docker-ce-cli="$full_version" \
docker-ce="$full_version" \
docker-ce-rootless-extras="$full_version" \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
echo "-- 安装docker-compose --"
sudo curl -sSL "https://get.daocloud.io/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "-- 禁用自动更新 --"
sudo apt-mark hold docker-ce docker-ce-cli containerd.io
echo "-- 启动服务 --"
sudo systemctl enable docker && sudo systemctl start docker
echo -e "\n=== 安装完成 ==="
docker --version
docker-compose --version
}
main
请写一个shell基于上述的部分安装逻辑实现如下的功能
脚本前面提取变量 docker的版本号 20.10.15 或 20.10(安装小版本最高的版本)
1. 检测当前主机是否是ubuntu环境本脚本支支持Ubuntu
2. 获取本机的版本号支持ubuntu18.04 20.04 22.04 24.04的版本
3. 根据ubuntu版本修改apt的镜像源为阿里源
4. 在线安装符合变量版本的docker在线安装docker-compose安装常用的插件
5. 禁止docker自动更新

View File

@@ -0,0 +1,84 @@
#!/bin/bash
set -e
# 用户配置部分
DISK="/dev/sdb" # 要操作的物理磁盘(请根据实际情况修改)
MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
FS_TYPE="ext4" # 文件系统类型支持ext4/xfs默认ext4
#----------------------------------------------------------
# 核心逻辑(建议非必要不修改)
#----------------------------------------------------------
function check_prerequisites() {
# 必须root权限运行检查
[[ $EUID -ne 0 ]] && echo -e "\033[31m错误必须使用root权限运行此脚本\033[0m" && exit 1
# 磁盘存在性检查
[[ ! -b "$DISK" ]] && echo -e "\033[31m错误磁盘 $DISK 不存在\033[0m" && exit 1
# 文件系统类型校验
if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
echo -e "\033[31m错误不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
exit 1
fi
}
function prepare_disk() {
local partition="${DISK}1"
echo -e "\033[34m正在初始化磁盘分区...\033[0m"
parted "$DISK" --script mklabel gpt
parted "$DISK" --script mkpart primary 0% 100%
parted "$DISK" --script set 1 lvm on
partprobe "$DISK" # 确保系统识别新分区表
echo -e "\033[34m正在创建LVM结构...\033[0m"
pvcreate "$partition"
vgcreate datavg "$partition"
lvcreate -y -l 100%FREE -n lvdata datavg
}
function format_and_mount() {
echo -e "\033[34m格式化逻辑卷...\033[0m"
if [[ "$FS_TYPE" == "ext4" ]]; then
mkfs.ext4 -F "/dev/datavg/lvdata"
else
mkfs.xfs -f "/dev/datavg/lvdata"
fi
echo -e "\033[34m设置挂载配置...\033[0m"
mkdir -p "$MOUNT_PATH"
UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
mount -a
}
function verify_result() {
echo -e "\n\033[1;36m最终验证结果\033[0m"
lsblk -f "$DISK"
echo -e "\n磁盘空间使用情况"
df -hT "$MOUNT_PATH"
}
# 主执行流程
check_prerequisites
prepare_disk
format_and_mount
verify_result
echo -e "\n\033[32m操作执行完毕请仔细核查上述输出信息\033[0m"
#请写一个shell脚本脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
#1.将物理磁盘的盘符修改为gpt格式
#2.将物理磁盘全部空间创建一个分区分区格式为lvm
#3.将分区分配给逻辑卷datavg
#4.将datavg所有可用的空间分配给逻辑卷lvdata
#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
#6.创建变量挂载点路径
#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
#8.执行lsblk和df -TH查看分区是否正确挂载

View File

@@ -0,0 +1,594 @@
#!/usr/bin/env bash
# ==============================================================================
# Metadata
# ==============================================================================
# Author : Smith Wang (Refactor by ChatGPT)
# Version : 2.0.0
# License : MIT
# Description : Configure Docker APT repository (mirror) and install Docker on
# Ubuntu (18.04/20.04/22.04/24.04) with robust offline handling.
#
# Modules :
# - Logging & Error Handling
# - Environment & Dependency Checks
# - Public Network Reachability Detection
# - Docker GPG Key Installation (Online/Offline)
# - Docker APT Repo Configuration
# - Docker Installation & Service Setup
#
# Notes :
# - This script DOES NOT modify Ubuntu APT sources (/etc/apt/sources.list)
# - This script DOES NOT set APT proxy (assumed handled elsewhere)
# - If public network is NOT reachable and local GPG key is missing, script
# will NOT proceed (per your requirement).
#
# ShellCheck : Intended clean for bash v5+ with: shellcheck -x <script>
# ==============================================================================
set -euo pipefail
# ==============================================================================
# Global Constants
# ==============================================================================
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_VERSION="2.0.0"
# Default mirror for Docker repo (you asked: only focus on docker source)
readonly DEFAULT_DOCKER_APT_MIRROR="https://mirrors.aliyun.com/docker-ce/linux/ubuntu"
# Default keyring location (recommended by modern Ubuntu)
readonly DEFAULT_KEYRING_PATH="/etc/apt/keyrings/docker.gpg"
# Exit codes
readonly EC_OK=0
readonly EC_GENERAL=1
readonly EC_UNSUPPORTED_OS=10
readonly EC_DEPENDENCY=11
readonly EC_OFFLINE_NO_KEY=20
readonly EC_APT_FAILURE=30
# ==============================================================================
# Configurable Variables (Environment Overrides)
# ==============================================================================
# You may export these before running:
# DOCKER_VERSION="20.10" # or "20.10.15" (optional)
# DOCKER_APT_MIRROR="https://..."
# DOCKER_KEYRING_PATH="/root/wdd/docker.gpg"
# LOCAL_DOCKER_GPG="/path/to/docker.gpg" (optional)
# LOG_LEVEL="DEBUG|INFO|WARN|ERROR"
DOCKER_VERSION="${DOCKER_VERSION:-20.10}"
DOCKER_APT_MIRROR="${DOCKER_APT_MIRROR:-$DEFAULT_DOCKER_APT_MIRROR}"
DOCKER_KEYRING_PATH="${DOCKER_KEYRING_PATH:-$DEFAULT_KEYRING_PATH}"
LOCAL_DOCKER_GPG="${LOCAL_DOCKER_GPG:-/root/wdd/docker.gpg}"
LOG_LEVEL="${LOG_LEVEL:-INFO}"
# ==============================================================================
# Function Call Graph (ASCII)
# ==============================================================================
# main
# |
# +--> init_traps
# |
# +--> check_platform
# |
# +--> ensure_prerequisites
# |
# +--> detect_public_network
# | |
# | +--> can_fetch_url_head
# |
# +--> ensure_docker_gpg_key
# | |
# | +--> install_key_from_online
# | | |
# | | +--> require_cmd (curl, gpg)
# | |
# | +--> install_key_from_local
# |
# +--> configure_docker_repo
# |
# +--> install_docker_packages
# | |
# | +--> resolve_docker_version
# |
# +--> pin_docker_packages
# |
# +--> enable_docker_service
# ==============================================================================
# ==============================================================================
# Logging
# ==============================================================================
### Map log level string to numeric value.
### @param level_str string Level string (DEBUG/INFO/WARN/ERROR)
### @return 0 Always returns 0; outputs numeric level to stdout
### @require none
log_level_to_num() {
case "${1:-INFO}" in
DEBUG) echo 10 ;;
INFO) echo 20 ;;
WARN) echo 30 ;;
ERROR) echo 40 ;;
*) echo 20 ;;
esac
}
### Unified logger with level gating.
### @param level string Log level
### @param message string Message
### @return 0 Always returns 0
### @require date
log() {
local level="${1:?level required}"
shift
local message="${*:-}"
local now
now="$(date '+%F %T')"
local current_level_num wanted_level_num
current_level_num="$(log_level_to_num "$LOG_LEVEL")"
wanted_level_num="$(log_level_to_num "$level")"
if [ "$wanted_level_num" -lt "$current_level_num" ]; then
return 0
fi
# > Keep format stable for parsing by log collectors
printf '%s [%s] %s: %s\n' "$now" "$level" "$SCRIPT_NAME" "$message" >&2
}
# ==============================================================================
# Error Handling & Traps
# ==============================================================================
### Trap handler for unexpected errors.
### @param exit_code int Exit code from failing command
### @return 0 Always returns 0
### @require none
on_error() {
local exit_code="${1:-$EC_GENERAL}"
log ERROR "Unhandled error occurred (exit_code=${exit_code})."
exit "$exit_code"
}
### Trap handler for script exit.
### @param exit_code int Exit code
### @return 0 Always returns 0
### @require none
on_exit() {
local exit_code="${1:-$EC_OK}"
if [ "$exit_code" -eq 0 ]; then
log INFO "Done."
else
log WARN "Exited with code ${exit_code}."
fi
return 0
}
### Initialize traps (ERR/INT/TERM/EXIT).
### @return 0 Success
### @require none
init_traps() {
trap 'on_error $?' ERR
trap 'log WARN "Interrupted (SIGINT)"; exit 130' INT
trap 'log WARN "Terminated (SIGTERM)"; exit 143' TERM
trap 'on_exit $?' EXIT
}
# ==============================================================================
# Privilege Helpers
# ==============================================================================
### Run a command as root (uses sudo if not root).
### @param cmd string Command to run
### @return 0 Success; non-zero on failure
### @require sudo (if not root)
run_root() {
if [ "$(id -u)" -eq 0 ]; then
# shellcheck disable=SC2068
"$@"
else
# shellcheck disable=SC2068
sudo "$@"
fi
}
# ==============================================================================
# Dependency Checks
# ==============================================================================
### Ensure a command exists in PATH.
### @param cmd_name string Command name
### @return 0 If exists; 1 otherwise
### @require none
require_cmd() {
local cmd_name="${1:?cmd required}"
if ! command -v "$cmd_name" >/dev/null 2>&1; then
log ERROR "Missing dependency: ${cmd_name}"
return 1
fi
return 0
}
# ==============================================================================
# Platform Check
# ==============================================================================
### Check OS is Ubuntu and supported versions.
### @return 0 Supported; exits otherwise
### @require lsb_release, awk
check_platform() {
require_cmd lsb_release || exit "$EC_DEPENDENCY"
local distro version
distro="$(lsb_release -is 2>/dev/null || true)"
version="$(lsb_release -rs 2>/dev/null || true)"
if [ "$distro" != "Ubuntu" ]; then
log ERROR "Unsupported OS: ${distro}. This script supports Ubuntu only."
exit "$EC_UNSUPPORTED_OS"
fi
case "$version" in
18.04|20.04|22.04|24.04) ;;
*)
log ERROR "Unsupported Ubuntu version: ${version}. Supported: 18.04/20.04/22.04/24.04"
exit "$EC_UNSUPPORTED_OS"
;;
esac
log INFO "Platform OK: ${distro} ${version}"
}
# ==============================================================================
# APT Prerequisites
# ==============================================================================
### Install required packages for repository/key management and Docker installation.
### @return 0 Success; exits on apt failures
### @require apt-get
ensure_prerequisites() {
require_cmd apt-get || exit "$EC_DEPENDENCY"
log INFO "Installing prerequisites (does NOT modify APT sources or proxy)..."
# > apt update must work via your existing proxy+mirror scripts
if ! run_root apt-get update; then
log ERROR "apt-get update failed. Check APT proxy / mirror configuration."
exit "$EC_APT_FAILURE"
fi
# > Keep dependencies minimal; curl/gpg used only for online key fetch.
if ! run_root apt-get install -y ca-certificates gnupg lsb-release; then
log ERROR "Failed to install prerequisites."
exit "$EC_APT_FAILURE"
fi
log INFO "Prerequisites installed."
}
# ==============================================================================
# Public Network Reachability
# ==============================================================================
### Check whether we can fetch HTTP headers from a URL (lightweight reachability).
### @param test_url string URL to test
### @return 0 Reachable; 1 otherwise
### @require curl (optional; if missing returns 1)
can_fetch_url_head() {
local test_url="${1:?url required}"
if ! command -v curl >/dev/null 2>&1; then
log WARN "curl not found; cannot test public network reachability via HTTP."
return 1
fi
# > Use short timeout to avoid hanging in restricted networks
curl -fsSI --max-time 3 "$test_url" >/dev/null 2>&1
}
### Detect whether public network access is available for Docker key fetch.
### @return 0 Online; 1 Offline/Uncertain
### @require none
detect_public_network() {
local test_url="${DOCKER_APT_MIRROR%/}/gpg"
log INFO "Detecting public network reachability: HEAD ${test_url}"
if can_fetch_url_head "$test_url"; then
log INFO "Public network reachable for Docker mirror."
return 0
fi
log WARN "Public network NOT reachable (or curl missing). Will try local GPG key."
return 1
}
# ==============================================================================
# Docker GPG Key Management
# ==============================================================================
### Install Docker GPG key from online source (mirror).
### @param gpg_url string GPG URL
### @param keyring_path string Keyring output path
### @return 0 Success; non-zero on failure
### @require curl, gpg, install, mkdir, chmod
install_key_from_online() {
local gpg_url="${1:?gpg_url required}"
local keyring_path="${2:?keyring_path required}"
require_cmd curl || return 1
require_cmd gpg || return 1
# > Write to temp then atomically install to avoid partial files
local tmp_dir tmp_gpg
tmp_dir="$(mktemp -d)"
tmp_gpg="${tmp_dir}/docker.gpg"
log INFO "Fetching Docker GPG key online: ${gpg_url}"
curl -fsSL --max-time 10 "$gpg_url" | gpg --dearmor -o "$tmp_gpg"
run_root mkdir -p "$(dirname "$keyring_path")"
run_root install -m 0644 "$tmp_gpg" "$keyring_path"
run_root chmod a+r "$keyring_path" || true
rm -rf "$tmp_dir"
log INFO "Docker GPG key installed: ${keyring_path}"
return 0
}
### Install Docker GPG key from local file (offline-friendly).
### @param local_gpg_path string Local GPG file path
### @param keyring_path string Keyring output path
### @return 0 Success; 1 if local key missing; non-zero on other failures
### @require install, mkdir, chmod
install_key_from_local() {
local local_gpg_path="${1:?local_gpg_path required}"
local keyring_path="${2:?keyring_path required}"
if [ ! -f "$local_gpg_path" ]; then
log WARN "Local Docker GPG key not found: ${local_gpg_path}"
return 1
fi
run_root mkdir -p "$(dirname "$keyring_path")"
run_root install -m 0644 "$local_gpg_path" "$keyring_path"
run_root chmod a+r "$keyring_path" || true
log INFO "Docker GPG key installed from local: ${local_gpg_path} -> ${keyring_path}"
return 0
}
### Ensure Docker GPG key exists, using online if reachable; otherwise local-only.
### Offline policy: if local key missing -> DO NOT proceed (exit).
### @param is_online int 0 online; 1 offline
### @return 0 Success; exits with EC_OFFLINE_NO_KEY when offline and no local key
### @require none
ensure_docker_gpg_key() {
local is_online="${1:?is_online required}"
# > If keyring already exists, reuse it (idempotent)
if [ -f "$DOCKER_KEYRING_PATH" ]; then
log INFO "Docker keyring already exists: ${DOCKER_KEYRING_PATH}"
run_root chmod a+r "$DOCKER_KEYRING_PATH" || true
return 0
fi
# > Determine local key candidate paths (priority order)
local script_dir local_candidate
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -n "$LOCAL_DOCKER_GPG" ]; then
local_candidate="$LOCAL_DOCKER_GPG"
elif [ -f "${script_dir}/docker.gpg" ]; then
local_candidate="${script_dir}/docker.gpg"
else
local_candidate=""
fi
local gpg_url
gpg_url="${DOCKER_APT_MIRROR%/}/gpg"
if [ "$is_online" -eq 0 ]; then
# Online: try online key fetch first; if fails, fallback to local if present.
log DEBUG "Online mode: attempt online key install, fallback to local."
if install_key_from_online "$gpg_url" "$DOCKER_KEYRING_PATH"; then
return 0
fi
if [ -n "$local_candidate" ] && install_key_from_local "$local_candidate" "$DOCKER_KEYRING_PATH"; then
return 0
fi
log ERROR "Failed to install Docker GPG key (online fetch failed and no usable local key)."
exit "$EC_DEPENDENCY"
fi
# Offline: strictly local only; if missing -> do not proceed
log INFO "Offline mode: install Docker GPG key from local only."
if [ -n "$local_candidate" ] && install_key_from_local "$local_candidate" "$DOCKER_KEYRING_PATH"; then
return 0
fi
log ERROR "Offline and local Docker GPG key is missing. Will NOT proceed (per policy)."
exit "$EC_OFFLINE_NO_KEY"
}
# ==============================================================================
# Docker Repo Configuration
# ==============================================================================
### Configure Docker APT repository list file.
### @return 0 Success; exits on apt update failures
### @require dpkg, lsb_release, tee, apt-get
configure_docker_repo() {
require_cmd dpkg || exit "$EC_DEPENDENCY"
require_cmd lsb_release || exit "$EC_DEPENDENCY"
require_cmd tee || exit "$EC_DEPENDENCY"
local codename arch list_file
codename="$(lsb_release -cs)"
arch="$(dpkg --print-architecture)"
list_file="/etc/apt/sources.list.d/docker.list"
log INFO "Configuring Docker APT repo: ${DOCKER_APT_MIRROR} (${codename}, ${arch})"
# > Only touch docker repo; do not touch system sources.list
run_root tee "$list_file" >/dev/null <<EOF
deb [arch=${arch} signed-by=${DOCKER_KEYRING_PATH}] ${DOCKER_APT_MIRROR} ${codename} stable
EOF
if ! run_root apt-get update; then
log ERROR "apt-get update failed after configuring Docker repo."
exit "$EC_APT_FAILURE"
fi
log INFO "Docker APT repo configured: ${list_file}"
}
# ==============================================================================
# Docker Installation
# ==============================================================================
### Resolve Docker package version string from APT cache.
### @param docker_version string Desired version ("20.10" or "20.10.15")
### @return 0 Success and echoes full apt version string; exits if not found
### @require apt-cache, awk, grep, sort, head
resolve_docker_version() {
local docker_version="${1:?docker_version required}"
require_cmd apt-cache || exit "$EC_DEPENDENCY"
require_cmd awk || exit "$EC_DEPENDENCY"
require_cmd grep || exit "$EC_DEPENDENCY"
require_cmd sort || exit "$EC_DEPENDENCY"
require_cmd head || exit "$EC_DEPENDENCY"
local resolved=""
# > apt-cache madison output includes epoch, keep it for apt-get install
if [[ "$docker_version" =~ ^[0-9]+\.[0-9]+$ ]]; then
# Pick newest patch/build for that major.minor
resolved="$(
apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${docker_version}([.-]|\~)" \
| sort -rV \
| head -1 || true
)"
elif [[ "$docker_version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
resolved="$(
apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${docker_version}.*" \
| head -1 || true
)"
else
log ERROR "Invalid DOCKER_VERSION format: ${docker_version} (expect 20.10 or 20.10.15)"
exit "$EC_GENERAL"
fi
if [ -z "$resolved" ]; then
log ERROR "Cannot find Docker version '${docker_version}' from APT. Check repo/mirror and apt proxy."
exit "$EC_APT_FAILURE"
fi
echo "$resolved"
return 0
}
### Install Docker packages via APT.
### @return 0 Success; exits on failure
### @require apt-get, systemctl
install_docker_packages() {
require_cmd apt-get || exit "$EC_DEPENDENCY"
local full_version
full_version="$(resolve_docker_version "$DOCKER_VERSION")"
log INFO "Installing Docker packages: docker-ce=${full_version}"
# > Compose: use docker-compose-plugin (no curl downloading binaries)
if ! run_root apt-get install -y \
"docker-ce=${full_version}" \
"docker-ce-cli=${full_version}" \
"docker-ce-rootless-extras=${full_version}" \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin; then
log ERROR "Docker installation failed."
exit "$EC_APT_FAILURE"
fi
# > Optional: provide docker-compose legacy command compatibility
if ! command -v docker-compose >/dev/null 2>&1; then
if [ -x /usr/libexec/docker/cli-plugins/docker-compose ]; then
run_root ln -sf /usr/libexec/docker/cli-plugins/docker-compose /usr/local/bin/docker-compose || true
fi
fi
log INFO "Docker packages installed."
}
### Pin Docker packages to avoid unintended upgrades.
### @return 0 Success; non-zero on failures (non-fatal)
### @require apt-mark
pin_docker_packages() {
if ! command -v apt-mark >/dev/null 2>&1; then
log WARN "apt-mark not found; skip pinning."
return 0
fi
log INFO "Holding Docker packages (prevent auto-upgrade)..."
run_root apt-mark hold \
docker-ce docker-ce-cli docker-ce-rootless-extras containerd.io \
docker-buildx-plugin docker-compose-plugin >/dev/null 2>&1 || true
return 0
}
### Enable and start Docker service, then verify versions.
### @return 0 Success; exits on failure to enable docker
### @require systemctl, docker
enable_docker_service() {
require_cmd systemctl || exit "$EC_DEPENDENCY"
log INFO "Enabling and starting docker service..."
run_root systemctl enable --now docker
# > Verification should not hard-fail the whole script
if command -v docker >/dev/null 2>&1; then
docker --version || true
docker compose version || true
fi
if command -v docker-compose >/dev/null 2>&1; then
docker-compose --version || true
fi
log INFO "Docker service enabled."
}
# ==============================================================================
# Main
# ==============================================================================
### Main entrypoint.
### @return 0 Success; non-zero on failure
### @require none
main() {
init_traps
log INFO "Starting Docker installer (v${SCRIPT_VERSION})..."
check_platform
ensure_prerequisites
local is_online=1
if detect_public_network; then
is_online=0
fi
ensure_docker_gpg_key "$is_online"
configure_docker_repo
install_docker_packages
pin_docker_packages
enable_docker_service
log INFO "All tasks completed successfully."
exit "$EC_OK"
}
main "$@"

View File

@@ -0,0 +1,24 @@
# master节点
sudo apt update
sudo apt install -y apt-cacher-ng
systemctl status apt-cacher-ng
# worker节点
sudo tee /etc/apt/apt.conf.d/01proxy <<EOF
Acquire::http::Proxy "http://10.22.57.8:3142";
Acquire::https::Proxy "http://10.22.57.8:3142";
EOF
ssh root@${server} "printf '%s\n' \
'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
| tee /etc/apt/apt.conf.d/01proxy >/dev/null"

View File

@@ -0,0 +1,504 @@
#!/usr/bin/env bash
#==============================================================================
# APT Source Switcher - Ubuntu -> TUNA (Tsinghua University) Mirror
#
# Author: Smith Wang
# Version: 1.0.0
# License: MIT
#==============================================================================
# Module Dependencies:
# - bash (>= 5.0)
# - coreutils: cp, mv, mkdir, date, id, chmod, chown
# - util-linux / distro tools: (optional) lsb_release
# - text tools: sed, awk, grep
# - apt: apt-get
#
# Notes:
# - Ubuntu 24.04 typically uses Deb822 sources file: /etc/apt/sources.list.d/ubuntu.sources
# - Ubuntu 20.04/22.04 often uses traditional /etc/apt/sources.list
#==============================================================================
set -euo pipefail
IFS=$'\n\t'
umask 022
#------------------------------------------------------------------------------
# Global Constants
#------------------------------------------------------------------------------
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_VERSION="1.0.0"
readonly TUNA_UBUNTU_URI="https://mirrors.tuna.tsinghua.edu.cn/ubuntu/"
readonly DEFAULT_BACKUP_DIR="/etc/apt/backup"
readonly APT_SOURCES_LIST="/etc/apt/sources.list"
readonly APT_DEB822_SOURCES="/etc/apt/sources.list.d/ubuntu.sources"
# Log levels: DEBUG=0, INFO=1, WARN=2, ERROR=3
readonly LOG_LEVEL_DEBUG=0
readonly LOG_LEVEL_INFO=1
readonly LOG_LEVEL_WARN=2
readonly LOG_LEVEL_ERROR=3
#------------------------------------------------------------------------------
# Runtime Variables (defaults)
#------------------------------------------------------------------------------
log_level="$LOG_LEVEL_INFO"
backup_dir="$DEFAULT_BACKUP_DIR"
do_update="false"
assume_yes="false"
ubuntu_codename=""
ubuntu_version_id=""
sources_mode="" # "deb822" or "list"
#------------------------------------------------------------------------------
# Function Call Graph (ASCII)
#
# main
# |
# +--> parse_args
# +--> setup_traps
# +--> require_root
# +--> detect_ubuntu
# | |
# | +--> read_os_release
# |
# +--> choose_sources_mode
# +--> ensure_backup_dir
# +--> backup_sources
# +--> confirm_action
# +--> apply_tuna_sources
# | |
# | +--> write_sources_list_tuna
# | +--> patch_deb822_sources_tuna
# |
# +--> apt_update (optional)
# +--> summary
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Logging
#------------------------------------------------------------------------------
### Log message with level.
# @param level int Numeric log level (0=DEBUG,1=INFO,2=WARN,3=ERROR)
# @param message string Message to print
# @return 0 success
# @require date printf
log() {
local level="$1"
local message="$2"
if [[ "$level" -lt "$log_level" ]]; then
return 0
fi
local level_name="INFO"
case "$level" in
0) level_name="DEBUG" ;;
1) level_name="INFO" ;;
2) level_name="WARN" ;;
3) level_name="ERROR" ;;
*) level_name="INFO" ;;
esac
# > Use RFC3339-ish timestamp to help operations & auditing
printf '%s [%s] %s: %s\n' "$(date '+%Y-%m-%d %H:%M:%S')" "$level_name" "$SCRIPT_NAME" "$message" >&2
}
### Convenience wrappers.
# @return 0
log_debug() { log "$LOG_LEVEL_DEBUG" "$1"; }
log_info() { log "$LOG_LEVEL_INFO" "$1"; }
log_warn() { log "$LOG_LEVEL_WARN" "$1"; }
log_error() { log "$LOG_LEVEL_ERROR" "$1"; }
#------------------------------------------------------------------------------
# Error handling / traps
#------------------------------------------------------------------------------
### Trap handler for unexpected errors.
# @param exit_code int Exit code
# @param line_no int Line number where error occurred
# @param cmd string The command that failed
# @return 0
# @require printf
on_err() {
local exit_code="$1"
local line_no="$2"
local cmd="$3"
log_error "Script failed (exit=${exit_code}) at line ${line_no}: ${cmd}"
}
### Cleanup handler (reserved for future extension).
# @return 0
# @require true
on_exit() {
true
}
### Setup traps for ERR and EXIT.
# @return 0
# @require trap
setup_traps() {
# > Preserve error context with BASH_LINENO and BASH_COMMAND
trap 'on_err "$?" "${LINENO}" "${BASH_COMMAND}"' ERR
trap 'on_exit' EXIT
}
#------------------------------------------------------------------------------
# Utility / validation
#------------------------------------------------------------------------------
### Print usage.
# @return 0
# @require cat
usage() {
cat <<'EOF'
Usage:
sudo ./apt_tuna_switch.sh [options]
Options:
-y, --yes Non-interactive; do not prompt.
-u, --update Run "apt-get update" after switching.
-b, --backup-dir Backup directory (default: /etc/apt/backup)
-d, --debug Enable DEBUG logs.
-h, --help Show help.
Examples:
sudo ./apt_tuna_switch.sh -y -u
sudo ./apt_tuna_switch.sh --backup-dir /root/apt-bak --update
EOF
}
### Parse CLI arguments.
# @param args string[] CLI args
# @return 0 success
# @require printf
parse_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
-y|--yes)
assume_yes="true"
shift
;;
-u|--update)
do_update="true"
shift
;;
-b|--backup-dir)
if [[ $# -lt 2 ]]; then
log_error "Missing value for --backup-dir"
usage
exit 2
fi
backup_dir="$2"
shift 2
;;
-d|--debug)
log_level="$LOG_LEVEL_DEBUG"
shift
;;
-h|--help)
usage
exit 0
;;
*)
log_error "Unknown argument: $1"
usage
exit 2
;;
esac
done
}
### Ensure running as root.
# @return 0 if root; exit otherwise
# @require id
require_root() {
if [[ "$(id -u)" -ne 0 ]]; then
log_error "This script must be run as root. Try: sudo ./${SCRIPT_NAME}"
exit 1
fi
}
### Read /etc/os-release fields.
# @return 0
# @require awk grep
read_os_release() {
if [[ ! -r /etc/os-release ]]; then
log_error "Cannot read /etc/os-release"
exit 1
fi
# > Parse key fields safely
local os_id
os_id="$(awk -F= '$1=="ID"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
ubuntu_version_id="$(awk -F= '$1=="VERSION_ID"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
ubuntu_codename="$(awk -F= '$1=="VERSION_CODENAME"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
if [[ "$os_id" != "ubuntu" ]]; then
log_error "Unsupported OS ID: ${os_id:-unknown}. This script supports Ubuntu only."
exit 1
fi
if [[ -z "$ubuntu_version_id" ]]; then
log_error "Failed to detect Ubuntu VERSION_ID from /etc/os-release"
exit 1
fi
# > For some environments, VERSION_CODENAME may be empty; try UBUNTU_CODENAME
if [[ -z "$ubuntu_codename" ]]; then
ubuntu_codename="$(awk -F= '$1=="UBUNTU_CODENAME"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
fi
}
### Detect supported Ubuntu version and codename.
# @return 0 success; exit otherwise
# @require awk
detect_ubuntu() {
read_os_release
case "$ubuntu_version_id" in
"20.04") ubuntu_codename="${ubuntu_codename:-focal}" ;;
"22.04") ubuntu_codename="${ubuntu_codename:-jammy}" ;;
"24.04") ubuntu_codename="${ubuntu_codename:-noble}" ;;
*)
log_error "Unsupported Ubuntu version: ${ubuntu_version_id}. Supported: 20.04/22.04/24.04"
exit 1
;;
esac
if [[ -z "$ubuntu_codename" ]]; then
log_error "Failed to determine Ubuntu codename."
exit 1
fi
log_info "Detected Ubuntu ${ubuntu_version_id} (${ubuntu_codename})"
}
### Decide which sources format to manage.
# @return 0
# @require test
choose_sources_mode() {
if [[ -f "$APT_DEB822_SOURCES" ]]; then
sources_mode="deb822"
elif [[ -f "$APT_SOURCES_LIST" ]]; then
sources_mode="list"
else
# > Defensive: if neither exists, still proceed by creating sources.list
sources_mode="list"
fi
log_info "Sources mode: ${sources_mode}"
}
### Ensure backup directory exists.
# @param backup_dir string Directory path
# @return 0
# @require mkdir
ensure_backup_dir() {
local dir="$1"
if [[ -z "$dir" ]]; then
log_error "Backup directory is empty."
exit 1
fi
mkdir -p "$dir"
log_debug "Backup directory ensured: $dir"
}
### Backup an APT sources file if it exists.
# @param src_path string File path to backup
# @param backup_dir string Backup directory
# @return 0
# @require cp date
backup_file_if_exists() {
local src_path="$1"
local dir="$2"
if [[ ! -e "$src_path" ]]; then
log_warn "Skip backup (not found): $src_path"
return 0
fi
local ts
ts="$(date '+%Y%m%d-%H%M%S')"
local base
base="$(basename "$src_path")"
local dst="${dir}/${base}.${ts}.bak"
cp -a "$src_path" "$dst"
log_info "Backed up: $src_path -> $dst"
}
### Backup relevant source files.
# @return 0
# @require cp
backup_sources() {
backup_file_if_exists "$APT_SOURCES_LIST" "$backup_dir"
backup_file_if_exists "$APT_DEB822_SOURCES" "$backup_dir"
}
### Ask for confirmation unless --yes is given.
# @return 0 if confirmed; exit otherwise
# @require read
confirm_action() {
if [[ "$assume_yes" == "true" ]]; then
log_info "Non-interactive mode: --yes"
return 0
fi
log_warn "About to replace APT sources with TUNA mirror:"
log_warn " ${TUNA_UBUNTU_URI}"
log_warn "This will modify system APT source configuration."
printf "Continue? [y/N]: " >&2
local ans=""
read -r ans
case "$ans" in
y|Y|yes|YES) return 0 ;;
*) log_info "Cancelled by user."; exit 0 ;;
esac
}
#------------------------------------------------------------------------------
# Core actions
#------------------------------------------------------------------------------
### Write traditional /etc/apt/sources.list using TUNA mirror.
# @param codename string Ubuntu codename (focal/jammy/noble)
# @return 0
# @require cat chmod chown mv
write_sources_list_tuna() {
local codename="$1"
local tmp_file
tmp_file="$(mktemp)"
# > Provide standard suites: release, updates, backports, security
cat >"$tmp_file" <<EOF
#------------------------------------------------------------------------------#
# Ubuntu ${codename} - TUNA Mirror
# Generated by: ${SCRIPT_NAME} v${SCRIPT_VERSION}
# Mirror: ${TUNA_UBUNTU_URI}
#------------------------------------------------------------------------------#
deb ${TUNA_UBUNTU_URI} ${codename} main restricted universe multiverse
deb ${TUNA_UBUNTU_URI} ${codename}-updates main restricted universe multiverse
deb ${TUNA_UBUNTU_URI} ${codename}-backports main restricted universe multiverse
deb ${TUNA_UBUNTU_URI} ${codename}-security main restricted universe multiverse
# If you want source packages, uncomment the following lines:
# deb-src ${TUNA_UBUNTU_URI} ${codename} main restricted universe multiverse
# deb-src ${TUNA_UBUNTU_URI} ${codename}-updates main restricted universe multiverse
# deb-src ${TUNA_UBUNTU_URI} ${codename}-backports main restricted universe multiverse
# deb-src ${TUNA_UBUNTU_URI} ${codename}-security main restricted universe multiverse
EOF
chmod 0644 "$tmp_file"
chown root:root "$tmp_file"
# > Atomic replace
mkdir -p "$(dirname "$APT_SOURCES_LIST")"
mv -f "$tmp_file" "$APT_SOURCES_LIST"
log_info "Updated: $APT_SOURCES_LIST"
}
### Patch Deb822 ubuntu.sources to use TUNA mirror.
# @param deb822_file string Path to ubuntu.sources
# @param tuna_uri string The TUNA mirror base URI
# @return 0
# @require sed cp mktemp chmod chown mv grep
patch_deb822_sources_tuna() {
local deb822_file="$1"
local tuna_uri="$2"
if [[ ! -f "$deb822_file" ]]; then
log_warn "Deb822 sources file not found: $deb822_file"
return 0
fi
local tmp_file
tmp_file="$(mktemp)"
cp -a "$deb822_file" "$tmp_file"
# > Replace any "URIs:" line to TUNA; keep other Deb822 fields unchanged.
# > Some systems may have multiple stanzas; this applies globally.
sed -i -E "s|^URIs:[[:space:]]+.*$|URIs: ${tuna_uri}|g" "$tmp_file"
# > Defensive check: ensure we still have at least one URIs line
if ! grep -qE '^URIs:[[:space:]]+' "$tmp_file"; then
log_error "Deb822 patch failed: no 'URIs:' line found after edit."
exit 1
fi
chmod 0644 "$tmp_file"
chown root:root "$tmp_file"
mv -f "$tmp_file" "$deb822_file"
log_info "Patched Deb822 sources: $deb822_file"
}
### Apply TUNA sources according to detected mode.
# @return 0
# @require true
apply_tuna_sources() {
case "$sources_mode" in
deb822)
patch_deb822_sources_tuna "$APT_DEB822_SOURCES" "$TUNA_UBUNTU_URI"
;;
list)
write_sources_list_tuna "$ubuntu_codename"
;;
*)
log_error "Unknown sources mode: $sources_mode"
exit 1
;;
esac
}
### Run apt-get update if requested.
# @return 0
# @require apt-get
apt_update() {
if [[ "$do_update" != "true" ]]; then
log_info "Skip apt-get update (use --update to enable)."
return 0
fi
log_info "Running: apt-get update"
# > Use noninteractive frontend to reduce prompts in some envs
DEBIAN_FRONTEND=noninteractive apt-get update
log_info "apt-get update completed."
}
### Print summary.
# @return 0
summary() {
log_info "Done."
log_info "Backup directory: ${backup_dir}"
log_info "Mirror applied: ${TUNA_UBUNTU_URI}"
log_info "Ubuntu: ${ubuntu_version_id} (${ubuntu_codename}), mode: ${sources_mode}"
}
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
### Main entry.
# @param args string[] CLI args
# @return 0 success; non-zero otherwise
# @require bash
main() {
parse_args "$@"
setup_traps
require_root
detect_ubuntu
choose_sources_mode
ensure_backup_dir "$backup_dir"
backup_sources
confirm_action
apply_tuna_sources
apt_update
summary
}
main "$@"

View File

@@ -0,0 +1,767 @@
#!/bin/bash
###############################################################################
# NGINX Installation Script for China Mainland with Mirror Acceleration
###############################################################################
# @author Advanced Bash Shell Engineer
# @version 1.0.0
# @license MIT
# @created 2026-01-19
# @desc Production-grade NGINX installation script with China mirror support
# Supports Ubuntu 18.04/20.04/22.04/24.04 with version pinning
###############################################################################
###############################################################################
# GLOBAL CONSTANTS
###############################################################################
readonly SCRIPT_NAME="$(basename "${BASH_SOURCE[0]}")"
readonly SCRIPT_VERSION="1.0.0"
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Color codes for output
readonly COLOR_RED="\033[0;31m"
readonly COLOR_GREEN="\033[0;32m"
readonly COLOR_YELLOW="\033[1;33m"
readonly COLOR_BLUE="\033[0;34m"
readonly COLOR_RESET="\033[0m"
# Log levels
readonly LOG_LEVEL_DEBUG=0
readonly LOG_LEVEL_INFO=1
readonly LOG_LEVEL_WARN=2
readonly LOG_LEVEL_ERROR=3
# Default configuration
readonly DEFAULT_NGINX_VERSION="stable"
readonly DEFAULT_MIRROR="ustc"
readonly SUPPORTED_UBUNTU_VERSIONS=("18.04" "20.04" "22.04" "24.04")
# Mirror configurations (China mainland accelerated sources)
declare -A MIRROR_URLS=(
["aliyun"]="http://mirrors.aliyun.com/nginx"
["tsinghua"]="https://mirrors.tuna.tsinghua.edu.cn/nginx"
["ustc"]="https://mirrors.ustc.edu.cn/nginx/ubuntu"
["official"]="http://nginx.org"
)
declare -A MIRROR_KEY_URLS=(
["aliyun"]="http://mirrors.aliyun.com/nginx/keys/nginx_signing.key"
["tsinghua"]="https://mirrors.tuna.tsinghua.edu.cn/nginx/keys/nginx_signing.key"
["ustc"]="https://mirrors.ustc.edu.cn/nginx/keys/nginx_signing.key"
["official"]="https://nginx.org/keys/nginx_signing.key"
)
# Global variables
CURRENT_LOG_LEVEL="${LOG_LEVEL_INFO}"
NGINX_VERSION="${DEFAULT_NGINX_VERSION}"
MIRROR_SOURCE="${DEFAULT_MIRROR}"
FORCE_REINSTALL=false
DRY_RUN=false
###############################################################################
# ERROR HANDLING & TRAPS
###############################################################################
set -euo pipefail
IFS=$'\n\t'
###
### Cleanup function for graceful exit
### @param none
### @return void
### @require none
###
cleanup() {
local exit_code=$?
if [[ ${exit_code} -ne 0 ]]; then
log_error "脚本退出,错误码: ${exit_code}"
fi
# > Perform cleanup operations if needed
return "${exit_code}"
}
trap cleanup EXIT
trap 'log_error "用户中断脚本执行"; exit 130' INT TERM
###############################################################################
# LOGGING FUNCTIONS
###############################################################################
###
### Core logging function with level-based filtering
### @param log_level integer Log level (0-3)
### @param message string Message to log
### @param color string Color code for output
### @return void
### @require none
###
_log() {
local log_level=$1
local message=$2
local color=$3
local level_name=$4
if [[ ${log_level} -ge ${CURRENT_LOG_LEVEL} ]]; then
local timestamp
timestamp="$(date '+%Y-%m-%d %H:%M:%S')"
echo -e "${color}[${timestamp}] [${level_name}] ${message}${COLOR_RESET}" >&2
fi
}
###
### Debug level logging
### @param message string Debug message
### @return void
### @require none
###
log_debug() {
_log "${LOG_LEVEL_DEBUG}" "$1" "${COLOR_BLUE}" "调试"
}
###
### Info level logging
### @param message string Info message
### @return void
### @require none
###
log_info() {
_log "${LOG_LEVEL_INFO}" "$1" "${COLOR_GREEN}" "信息"
}
###
### Warning level logging
### @param message string Warning message
### @return void
### @require none
###
log_warn() {
_log "${LOG_LEVEL_WARN}" "$1" "${COLOR_YELLOW}" "警告"
}
###
### Error level logging
### @param message string Error message
### @return void
### @require none
###
log_error() {
_log "${LOG_LEVEL_ERROR}" "$1" "${COLOR_RED}" "错误"
}
###############################################################################
# VALIDATION FUNCTIONS
###############################################################################
###
### Check if script is running with root privileges
### @param none
### @return 0 if root, 1 otherwise
### @require none
###
check_root_privileges() {
if [[ ${EUID} -ne 0 ]]; then
log_error "此脚本必须以 root 身份运行,或使用 sudo 执行"
return 1
fi
log_debug "已确认具备 root 权限"
return 0
}
###
### Validate Ubuntu version compatibility
### @param none
### @return 0 if supported, 1 otherwise
### @require lsb_release command
###
validate_ubuntu_version() {
local ubuntu_version
# > Check if lsb_release exists
if ! command -v lsb_release &> /dev/null; then
log_error "未找到 lsb_release 命令,无法识别 Ubuntu 版本。"
return 1
fi
ubuntu_version="$(lsb_release -rs)"
log_debug "检测到 Ubuntu 版本: ${ubuntu_version}"
# > Validate against supported versions
local supported=false
for version in "${SUPPORTED_UBUNTU_VERSIONS[@]}"; do
if [[ "${ubuntu_version}" == "${version}" ]]; then
supported=true
break
fi
done
if [[ "${supported}" == false ]]; then
log_error "Ubuntu ${ubuntu_version} 不受支持。支持的版本: ${SUPPORTED_UBUNTU_VERSIONS[*]}"
return 1
fi
log_info "Ubuntu ${ubuntu_version} 受支持"
return 0
}
###
### Validate mirror source selection
### @param mirror_name string 镜像源 name
### @return 0 if valid, 1 otherwise
### @require none
###
validate_mirror_source() {
local mirror_name=$1
if [[ ! -v MIRROR_URLS["${mirror_name}"] ]]; then
log_error "无效的镜像源: ${mirror_name}"
log_info "可用镜像源: ${!MIRROR_URLS[*]}"
return 1
fi
log_debug "镜像源 '${mirror_name}' 有效"
return 0
}
###
### Check network connectivity to mirror
### @param mirror_url string URL to test
### @return 0 if reachable, 1 otherwise
### @require curl
###
check_mirror_connectivity() {
local mirror_url=$1
local timeout=10
log_debug "正在测试镜像连通性: ${mirror_url}"
if curl -sSf --connect-timeout "${timeout}" --max-time "${timeout}" \
"${mirror_url}" -o /dev/null 2>/dev/null; then
log_debug "镜像 ${mirror_url} 可访问"
return 0
else
log_warn "镜像 ${mirror_url} 不可访问"
return 1
fi
}
###############################################################################
# SYSTEM PREPARATION FUNCTIONS
###############################################################################
###
### Install required system dependencies
### @param none
### @return 0 on success, 1 on failure
### @require apt-get
###
install_dependencies() {
log_info "正在安装系统依赖..."
local dependencies=(
"curl"
"gnupg2"
"ca-certificates"
"lsb-release"
"ubuntu-keyring"
"apt-transport-https"
)
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会安装: ${dependencies[*]}"
return 0
fi
# > Update package index first
if ! apt-get update -qq; then
log_error "更新软件包索引失败"
return 1
fi
# > Install dependencies
if ! apt-get install -y -qq "${dependencies[@]}"; then
log_error "安装依赖失败"
return 1
fi
log_info "依赖安装完成"
return 0
}
###
### Remove existing NGINX installation if present
### @param none
### @return 0 on success or if not installed
### @require apt-get, dpkg
###
remove_existing_nginx() {
log_info "正在检查是否已安装 NGINX..."
if ! dpkg -l | grep -q "^ii.*nginx"; then
log_info "未发现已安装的 NGINX"
return 0
fi
if [[ "${FORCE_REINSTALL}" == false ]]; then
log_warn "NGINX 已安装。如需重装请使用 --force。"
return 1
fi
log_info "正在卸载已安装的 NGINX..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会卸载已安装的 NGINX"
return 0
fi
# > Stop NGINX service if running
if systemctl is-active --quiet nginx 2>/dev/null; then
systemctl stop nginx || true
fi
# > Remove NGINX packages
if ! apt-get remove --purge -y nginx nginx-common nginx-full 2>/dev/null; then
log_warn "部分 NGINX 软件包可能未能完全卸载(可忽略)"
fi
# > Clean up configuration files
apt-get autoremove -y -qq || true
log_info "已卸载现有 NGINX"
return 0
}
###############################################################################
# NGINX INSTALLATION FUNCTIONS
###############################################################################
###
### Import NGINX GPG signing key
### @param mirror_name string 镜像源 name
### @return 0 on success, 1 on failure
### @require curl, gpg
###
import_nginx_gpg_key() {
local mirror_name=$1
local key_url="${MIRROR_KEY_URLS[${mirror_name}]}"
local keyring_path="/usr/share/keyrings/nginx-archive-keyring.gpg"
log_info "正在导入 NGINX GPG 签名密钥(来源:${mirror_name}..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会从以下地址导入 GPG 密钥: ${key_url}"
return 0
fi
# > Remove old keyring if exists
[[ -f "${keyring_path}" ]] && rm -f "${keyring_path}"
# > Download and import GPG key
if ! curl -fsSL "${key_url}" | gpg --dearmor -o "${keyring_path}" 2>/dev/null; then
log_error "导入 GPG 密钥失败: ${key_url}"
return 1
fi
# > Verify the key was imported correctly
if ! gpg --dry-run --quiet --no-keyring --import --import-options import-show \
"${keyring_path}" &>/dev/null; then
log_error "GPG 密钥校验失败"
return 1
fi
# > Set proper permissions
chmod 644 "${keyring_path}"
log_info "GPG 密钥导入并校验成功"
return 0
}
###
### Configure NGINX APT repository
### @param mirror_name string 镜像源 name
### @return 0 on success, 1 on failure
### @require lsb_release
###
configure_nginx_repository() {
local mirror_name=$1
local mirror_url="${MIRROR_URLS[${mirror_name}]}"
local codename
codename="$(lsb_release -cs)"
local repo_file="/etc/apt/sources.list.d/nginx.list"
local keyring_path="/usr/share/keyrings/nginx-archive-keyring.gpg"
# > 不同镜像源目录结构可能不同:
# > - 官方/部分镜像:.../packages/ubuntu
# > - USTC.../ubuntu
local repo_base
case "${mirror_name}" in
ustc)
repo_base="${mirror_url}"
;;
*)
repo_base="${mirror_url}/packages/ubuntu"
;;
esac
log_info "正在配置 NGINX 软件源Ubuntu ${codename}..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会配置软件源deb [signed-by=${keyring_path}] ${repo_base} ${codename} nginx"
return 0
fi
# > Create repository configuration
local repo_config="deb [signed-by=${keyring_path}] ${repo_base} ${codename} nginx"
echo "${repo_config}" | tee "${repo_file}" > /dev/null
log_debug "已生成软件源配置文件:${repo_file}"
log_debug "软件源地址:${repo_base} ${codename}"
log_info "NGINX 软件源配置完成"
return 0
}
###
### Configure APT pinning preferences for NGINX
### @param none
### @return 0 on success
### @require none
###
configure_apt_pinning() {
local pref_file="/etc/apt/preferences.d/99nginx"
log_info "正在配置 APT Pin 优先级..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会配置 APT Pin 优先级"
return 0
fi
# > Create pinning configuration for priority
cat > "${pref_file}" <<EOF
Package: *
Pin: origin nginx.org
Pin: release o=nginx
Pin-Priority: 900
EOF
log_debug "APT Pin 配置写入:${pref_file}"
log_info "APT Pin 优先级配置完成"
return 0
}
###
### Install NGINX package
### @param version string NGINX version to install (stable/mainline/specific)
### @return 0 on success, 1 on failure
### @require apt-get
###
install_nginx_package() {
local version=$1
local package_spec="nginx"
log_info "正在安装 NGINX ${version}..."
# > Update package index with new repository
if [[ "${DRY_RUN}" == false ]]; then
if ! apt-get update -qq; then
log_error "更新软件包索引失败"
return 1
fi
fi
# > Handle version specification
if [[ "${version}" != "stable" && "${version}" != "mainline" ]]; then
# > Specific version requested
package_spec="nginx=${version}"
log_debug "安装指定版本:${package_spec}"
else
log_debug "从软件源安装:${version}"
fi
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会安装软件包:${package_spec}"
return 0
fi
# > Install NGINX
if ! DEBIAN_FRONTEND=noninteractive apt-get install -y -qq "${package_spec}"; then
log_error "安装 NGINX 失败"
return 1
fi
log_info "NGINX 安装完成"
return 0
}
###
### Verify NGINX installation
### @param none
### @return 0 on success, 1 on failure
### @require nginx
###
verify_nginx_installation() {
log_info "正在验证 NGINX 安装结果..."
# > Check if nginx binary exists
if ! command -v nginx &> /dev/null; then
log_error "未在 PATH 中找到 nginx 可执行文件"
return 1
fi
# > Get and display version
local nginx_version_output
nginx_version_output="$(nginx -v 2>&1)"
log_info "已安装: ${nginx_version_output}"
# > Test configuration
if ! nginx -t &>/dev/null; then
log_error "NGINX 配置文件校验失败"
return 1
fi
log_info "NGINX 安装验证通过"
return 0
}
###
### Enable and start NGINX service
### @param none
### @return 0 on success, 1 on failure
### @require systemctl
###
enable_nginx_service() {
log_info "正在设置 NGINX 开机自启并启动服务..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会启用并启动 NGINX 服务"
return 0
fi
# > Enable service to start on boot
if ! systemctl enable nginx &>/dev/null; then
log_error "设置 NGINX 开机自启失败"
return 1
fi
# > Start the service
if ! systemctl start nginx; then
log_error "启动 NGINX 服务失败"
return 1
fi
# > Verify service is running
if ! systemctl is-active --quiet nginx; then
log_error "NGINX 服务未处于运行状态"
return 1
fi
log_info "NGINX 服务已启用并启动"
return 0
}
###############################################################################
# MAIN ORCHESTRATION
###############################################################################
###
### Display usage information
### @param none
### @return void
### @require none
###
show_usage() {
cat <<EOF
Usage: ${SCRIPT_NAME} [选项]
NGINX 安装脚本(面向中国大陆镜像加速) v${SCRIPT_VERSION}
选项:
-v, --version VERSION 指定要安装的 NGINX 版本
stable/mainline/1.24.0/...
默认:${DEFAULT_NGINX_VERSION}
-m, --mirror MIRROR 选择镜像源
aliyun/tsinghua/ustc/official
默认:${DEFAULT_MIRROR}
-f, --force 若已安装则强制重装
-d, --dry-run 演练模式:仅展示将执行的操作,不真正执行
--debug 开启调试日志
-h, --help 显示帮助信息
示例:
# 使用默认镜像USTC安装稳定版
sudo ${SCRIPT_NAME}
# 使用清华镜像安装指定版本
sudo ${SCRIPT_NAME} --version 1.24.0 --mirror tsinghua
# 强制重装并开启调试
sudo ${SCRIPT_NAME} --force --debug
# 演练模式预览
sudo ${SCRIPT_NAME} --dry-run
支持的 Ubuntu 版本:
${SUPPORTED_UBUNTU_VERSIONS[*]}
可用镜像源:
${!MIRROR_URLS[*]}
EOF
}
###
### Parse command line arguments
### @param args array Command line arguments
### @return 0 on success, 1 on invalid arguments
### @require none
###
parse_arguments() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--version)
NGINX_VERSION="$2"
shift 2
;;
-m|--mirror)
MIRROR_SOURCE="$2"
shift 2
;;
-f|--force)
FORCE_REINSTALL=true
shift
;;
-d|--dry-run)
DRY_RUN=true
shift
;;
--debug)
CURRENT_LOG_LEVEL="${LOG_LEVEL_DEBUG}"
shift
;;
-h|--help)
show_usage
exit 0
;;
*)
log_error "未知参数: $1"
show_usage
exit 1
;;
esac
done
return 0
}
###
### Main installation workflow
### @param none
### @return 0 on success, 1 on failure
### @require all functions above
###
main() {
log_info "========================================="
log_info "NGINX 安装脚本 v${SCRIPT_VERSION}"
log_info "========================================="
# > Step 1: Pre-flight checks
log_info "步骤 1/8执行预检查..."
check_root_privileges || return 1
validate_ubuntu_version || return 1
validate_mirror_source "${MIRROR_SOURCE}" || return 1
# > Step 2: Check mirror connectivity
log_info "步骤 2/8检查镜像连通性..."
if ! check_mirror_connectivity "${MIRROR_URLS[${MIRROR_SOURCE}]}"; then
log_warn "主镜像不可用,尝试回退方案..."
# > Fallback to official if mirror fails
if [[ "${MIRROR_SOURCE}" != "official" ]]; then
MIRROR_SOURCE="official"
log_info "已回退到官方源"
fi
fi
# > Step 3: Install dependencies
log_info "步骤 3/8安装依赖..."
install_dependencies || return 1
# > Step 4: Handle existing installation
log_info "步骤 4/8检查已安装版本..."
remove_existing_nginx || return 1
# > Step 5: Import GPG key
log_info "步骤 5/8导入 NGINX GPG 密钥..."
import_nginx_gpg_key "${MIRROR_SOURCE}" || return 1
# > Step 6: Configure repository
log_info "步骤 6/8配置 NGINX 软件源..."
configure_nginx_repository "${MIRROR_SOURCE}" || return 1
configure_apt_pinning || return 1
# > Step 7: Install NGINX
log_info "步骤 7/8安装 NGINX..."
install_nginx_package "${NGINX_VERSION}" || return 1
verify_nginx_installation || return 1
# > Step 8: Enable service
log_info "步骤 8/8启用 NGINX 服务..."
enable_nginx_service || return 1
log_info "========================================="
log_info "✓ NGINX 安装完成!"
log_info "========================================="
if [[ "${DRY_RUN}" == false ]]; then
log_info "服务状态: $(systemctl is-active nginx)"
log_info "NGINX 版本: $(nginx -v 2>&1 | cut -d'/' -f2)"
log_info ""
log_info "常用命令:"
log_info " 启动: sudo systemctl start nginx"
log_info " 停止: sudo systemctl stop nginx"
log_info " 重启: sudo systemctl restart nginx"
log_info " 状态: sudo systemctl status nginx"
log_info " 校验配置: sudo nginx -t"
fi
return 0
}
###############################################################################
# SCRIPT ENTRY POINT
###############################################################################
# ASCII Flow Diagram - Function Call Hierarchy
# ┌─────────────────────────────────────────────────────────────┐
# │ MAIN() │
# └──────────────────┬──────────────────────────────────────────┘
# │
# ┌─────────────┼─────────────┬──────────────┬─────────────┐
# │ │ │ │ │
# ▼ ▼ ▼ ▼ ▼
# ┌─────────┐ ┌──────────┐ ┌─────────┐ ┌────────────┐ ┌─────────┐
# │Pre-flight│ │Install │ │Import │ │Configure │ │Install │
# │Checks │ │Deps │ │GPG Key │ │Repository │ │NGINX │
# └─────────┘ └──────────┘ └─────────┘ └────────────┘ └─────────┘
# │ │ │ │ │
# ├─check_root_privileges │ │ │
# ├─validate_ubuntu_version │ │ │
# └─validate_mirror_source │ │ │
# │ │ │ │
# └─install_dependencies │ │
# │ │ │
# └─import_nginx_gpg_key │
# │ │
# ├─configure_nginx_repository
# └─configure_apt_pinning
# │
# ├─install_nginx_package
# └─verify_nginx_installation
# Parse command line arguments
parse_arguments "$@"
# Execute main workflow
main
exit $?

View File

@@ -1,9 +1,8 @@
upstream proxy_server {
ip_hash;
server 192.168.0.2:30500;
server 192.168.0.4:30500;
server 192.168.0.5:30500;
server 192.168.0.6:30500;
server 192.168.1.4:30500;
server 192.168.1.3:30500;
server 192.168.1.5:30500;
}
server {
@@ -22,7 +21,7 @@ server {
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.xakny.io;
proxy_set_header Host fake-domain.sc-my-uav-260202.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

View File

@@ -6,7 +6,18 @@ cp kube_config_cluster.yml /root/.kube/config
kubectl apply -f k8s-dashboard.yaml
kubectl delete -f k8s-dashboard.yaml
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
kubectl create token read-only-user -n kubernetes-dashboard --duration=26280h 1 token的管控
# 删除旧的绑定(为了保险起见,避免残留)
kubectl delete clusterrolebinding admin-user
# 重新创建绑定
kubectl create clusterrolebinding admin-user \
--clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:admin-user 3 重新生成token
kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
## 你无法查看已经生成的 Token 列表。
kubectl apply -f k8s-nfs.yaml
kubectl delete -f k8s-nfs.yaml
@@ -16,10 +27,11 @@ kubectl -n kube-system describe pod $(kubectl -n kube-system get pods | grep nfs
kubectl apply -f k8s-nfs-test.yaml
kubectl delete -f k8s-nfs-test.yaml
# 在NFS-Server机器上执行
cd /var/lib/docker/nfs_data
kubectl create ns xakny
kubectl create ns sc-my-uav-260202
kubectl apply -f k8s-pvc.yaml
kubectl delete -f k8s-pvc.yaml
@@ -37,6 +49,9 @@ kubectl delete -f k8s-rabbitmq.yaml
kubectl apply -f k8s-redis.yaml
kubectl delete -f k8s-redis.yaml
kubectl apply -f k8s-influxdb.yaml
kubectl delete -f k8s-influxdb.yaml
kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml

View File

@@ -1,6 +1,6 @@
export harbor_host=192.168.0.2:8033
export harbor_host=192.168.1.4:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -6,7 +6,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq
export name_space=xafkapp
export name_space=xayd
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force

View File

@@ -0,0 +1,862 @@
#!/usr/bin/env bash
#===============================================================================
# 名称: lvm_extend_with_disk.sh
# 描述: 使用新增裸盘扩展指定挂载目录对应的 LVM LVext4 / xfs
# 作者: WDD
# 版本: 1.0.0
# 许可证: MIT
#
# 依赖(命令): bash(>=5.0), coreutils, util-linux, lvm2, gdisk 或 parted,
# findmnt, lsblk, blkid, wipefs, partprobe, udevadm,
# resize2fs(用于ext4), xfs_growfs(用于xfs)
#
# 安全说明:
# - 本脚本会对“新增裸盘”进行 GPT 分区、清空签名(wipefs)、创建 PV 等破坏性操作。
# - 默认会做严格安全检查:若检测到磁盘疑似在使用/含签名/已有分区,将拒绝执行(可用 --force 覆盖部分检查)。
#===============================================================================
set -euo pipefail
IFS=$'\n\t'
#===============================================================================
# 全局常量定义区
#===============================================================================
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_VERSION="1.0.0"
# 日志级别DEBUG/INFO/WARN/ERROR
readonly LOG_LEVEL_DEFAULT="INFO"
#===============================================================================
# 全局变量(由参数控制)
#===============================================================================
log_level="${LOG_LEVEL_DEFAULT}"
dry_run="0"
force="0"
raw_disk="" # 例如 /dev/sdb
target_mount="" # 例如 /data
# 运行时探测结果
fs_type="" # ext4 | xfs
mount_source="" # /dev/mapper/vg-lv 或 /dev/vg/lv
lv_path="" # 规范化后的 LV 真实路径
vg_name="" # VG 名称
new_part="" # 新建分区路径:/dev/sdb1 或 /dev/nvme0n1p1
# 回滚动作栈(先进后出)
rollback_stack=()
committed="0"
#===============================================================================
# 函数调用关系图ASCII
#===============================================================================
# main
# ├─ parse_args
# ├─ init_traps
# ├─ require_root
# ├─ check_dependencies
# ├─ detect_target_lvm_by_mount
# │ ├─ get_mount_info
# │ ├─ normalize_lv_and_vg
# ├─ validate_raw_disk_safe
# ├─ prepare_gpt_partition_for_lvm
# │ ├─ make_partition_path
# │ ├─ create_gpt_partition (sgdisk|parted)
# ├─ create_and_attach_pv_to_vg
# │ ├─ pvcreate
# │ ├─ vgextend
# ├─ extend_lv_and_grow_fs
# │ ├─ lvextend -r
# ├─ verify_result
# └─ success_exit
#
# on_error (trap ERR)
# └─ rollback_all
#===============================================================================
#===============================================================================
# 日志模块
#===============================================================================
### 输出日志(统一入口)
# @param level string 日志级别(DEBUG/INFO/WARN/ERROR)
# @param message string 日志内容
# @return 0 成功
# @require date, printf
log() {
local level="$1"; shift
local message="$*"
local ts
ts="$(date '+%Y-%m-%d %H:%M:%S')"
# > 级别过滤DEBUG < INFO < WARN < ERROR
local order_current order_target
order_current="$(log_level_to_int "$log_level")"
order_target="$(log_level_to_int "$level")"
if (( order_target < order_current )); then
return 0
fi
printf '[%s] [%s] %s\n' "$ts" "$level" "$message" >&2
}
### 将日志级别映射为数字
# @param level string 日志级别
# @return 0 成功echo输出数字
# @require printf
log_level_to_int() {
local level="$1"
case "$level" in
DEBUG) printf '10' ;;
INFO) printf '20' ;;
WARN) printf '30' ;;
ERROR) printf '40' ;;
*) printf '20' ;; # 默认 INFO
esac
}
log_debug(){ log "DEBUG" "$*"; }
log_info(){ log "INFO" "$*"; }
log_warn(){ log "WARN" "$*"; }
log_error(){ log "ERROR" "$*"; }
#===============================================================================
# 工具/执行模块
#===============================================================================
### 执行命令(支持 dry-run
# @param cmd string 要执行的命令(以参数形式传入)
# @return 0 成功非0 失败
# @require printf, bash
run_cmd() {
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] $*"
return 0
fi
log_debug "RUN: $*"
"$@"
}
### 检查命令是否存在
# @param cmd string 命令名
# @return 0 存在非0 不存在
# @require command
require_cmd() {
local cmd="$1"
if ! command -v "$cmd" >/dev/null 2>&1; then
log_error "缺少依赖命令: $cmd"
return 1
fi
}
### 推入回滚动作(字符串命令)
# @param action string 回滚动作(将通过 bash -c 执行)
# @return 0 成功
# @require none
push_rollback() {
local action="$1"
rollback_stack+=("$action")
log_debug "已登记回滚动作: $action"
}
### 执行所有回滚动作LIFO
# @return 0 成功(尽最大努力)
# @require bash
rollback_all() {
local i
if (( ${#rollback_stack[@]} == 0 )); then
log_warn "无回滚动作可执行。"
return 0
fi
log_warn "开始回滚(共 ${#rollback_stack[@]} 步)..."
for (( i=${#rollback_stack[@]}-1; i>=0; i-- )); do
local action="${rollback_stack[$i]}"
log_warn "回滚: $action"
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] 跳过回滚执行"
continue
fi
# > 回滚尽力而为:失败不再中断后续回滚
bash -c "$action" || log_warn "回滚动作执行失败(已忽略): $action"
done
log_warn "回滚结束。"
}
#===============================================================================
# Trap/错误处理模块
#===============================================================================
### 初始化 trap
# @return 0 成功
# @require trap
init_traps() {
trap 'on_error $? $LINENO' ERR
trap 'on_exit $?' EXIT
}
### ERR trap 处理
# @param exit_code int 退出码
# @param line_no int 行号
# @return 0 成功
# @require none
on_error() {
local exit_code="$1"
local line_no="$2"
log_error "脚本执行失败exit=$exit_code, line=$line_no)。"
if [[ "${committed}" == "1" ]]; then
log_error "检测到已提交变更LV/FS 已扩容),为避免破坏性操作:将跳过自动回滚。"
exit "$exit_code"
fi
log_error "将进行回滚..."
rollback_all
exit "$exit_code"
}
### EXIT trap 处理(用于输出调试信息,不做回滚)
# @param exit_code int 退出码
# @return 0 成功
# @require none
on_exit() {
local exit_code="$1"
if [[ "$exit_code" == "0" ]]; then
log_info "完成:扩展流程已成功结束。"
else
log_error "退出脚本以非0退出码结束exit=$exit_code)。"
fi
}
#===============================================================================
# 参数解析与使用说明
#===============================================================================
### 使用说明
# @return 0 成功
# @require cat
usage() {
cat <<EOF
用法:
$SCRIPT_NAME --disk /dev/sdX --mount /path/to/mount [--force] [--dry-run] [--log-level DEBUG|INFO|WARN|ERROR]
参数:
--disk, -d 需要格式化并用于扩展的裸盘(例如 /dev/sdb, /dev/nvme1n1
--mount, -m 需要扩展的挂载目录(例如 /data
--force, -f 强制执行:当检测到磁盘含签名/已有分区等风险时仍继续(仍会拒绝“已挂载/已作为PV”的磁盘
--dry-run 演练模式:仅打印将执行的操作,不实际改动
--log-level 日志级别(默认 INFO
示例:
$SCRIPT_NAME -d /dev/sdb -m /data
$SCRIPT_NAME -d /dev/nvme1n1 -m /data --force
EOF
}
### 解析参数
# @param args string 参数列表
# @return 0 成功
# @require shift
parse_args() {
while (( $# > 0 )); do
case "$1" in
-d|--disk) raw_disk="${2:-}"; shift 2 ;;
-m|--mount) target_mount="${2:-}"; shift 2 ;;
-f|--force) force="1"; shift ;;
--dry-run) dry_run="1"; shift ;;
--log-level) log_level="${2:-INFO}"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*)
log_error "未知参数: $1"
usage
exit 2
;;
esac
done
if [[ -z "$raw_disk" || -z "$target_mount" ]]; then
log_error "必须提供 --disk 与 --mount"
usage
exit 2
fi
}
#===============================================================================
# 前置检查模块
#===============================================================================
### 检查是否 root
# @return 0 成功非0 失败
# @require id
require_root() {
if [[ "$(id -u)" != "0" ]]; then
log_error "必须以 root 运行(需要分区/LVM 操作权限)。"
return 1
fi
}
### 检查依赖命令
# @return 0 成功非0 失败
# @require command
check_dependencies() {
# 基础
require_cmd findmnt
require_cmd lsblk
require_cmd blkid
require_cmd wipefs
require_cmd partprobe
require_cmd udevadm
# LVM
require_cmd pvs
require_cmd vgs
require_cmd lvs
require_cmd pvcreate
require_cmd pvremove
require_cmd vgextend
require_cmd vgreduce
require_cmd lvextend
# 分区工具:优先 sgdiskgdisk套件否则 parted
if command -v sgdisk >/dev/null 2>&1; then
:
else
require_cmd parted
fi
# 文件系统扩展工具(由探测类型决定)
# > 在探测 fs_type 后再校验(此处先不强制)
}
#===============================================================================
# 目标挂载点探测模块
#===============================================================================
### 获取挂载信息source 与 fstype
# @param mount_path string 挂载目录
# @return 0 成功
# @require findmnt
get_mount_info() {
local mount_path="$1"
if ! findmnt -T "$mount_path" >/dev/null 2>&1; then
log_error "挂载目录不存在或未挂载: $mount_path"
return 1
fi
mount_source="$(findmnt -nr -T "$mount_path" -o SOURCE)"
fs_type="$(findmnt -nr -T "$mount_path" -o FSTYPE)"
if [[ -z "$mount_source" || -z "$fs_type" ]]; then
log_error "无法获取挂载信息: mount=$mount_path"
return 1
fi
case "$fs_type" in
ext4|xfs) ;;
*)
log_error "仅支持 ext4 或 xfs当前检测到: $fs_type"
return 1
;;
esac
}
### 获取块设备 MAJ:MIN主:次设备号)
# @param dev string 块设备路径
# @return 0 成功echo 输出 253:0非0 失败
# @require lsblk, printf
get_maj_min() {
local dev="$1"
local mm
mm="$(lsblk -dn -o MAJ:MIN "$dev" 2>/dev/null | awk '{$1=$1;print}')"
if [[ -z "$mm" ]]; then
log_error "无法获取 MAJ:MIN: dev=$dev"
return 1
fi
printf '%s' "$mm"
}
### 通过扫描 lvs 输出匹配 LV/VG优先 dm_path其次 MAJ:MIN
# @param src_dev string 解析后的挂载设备(如 /dev/mapper/vg-lv 或 /dev/dm-0
# @param real_dev string 真实设备readlink -f 后,如 /dev/dm-0
# @return 0 成功echo 输出 "lv_path|vg_name"
# @require lvs, awk, readlink, lsblk
lookup_lv_vg_from_lvs() {
local src_dev="$1"
local real_dev="$2"
local mm
mm="$(get_maj_min "$real_dev")"
# A) 尝试lvs 输出包含 lv_dm_path 时,用 dm_path 直接匹配(最稳)
# > 不用 --select避免 selection 兼容性问题
if out="$(lvs --noheadings --separator '|' -o lv_path,vg_name,lv_dm_path 2>/dev/null)"; then
local hit
hit="$(
echo "$out" | awk -F'|' -v s="$src_dev" -v r="$real_dev" '
function trim(x){gsub(/^[ \t]+|[ \t]+$/,"",x); return x}
{
lv=trim($1); vg=trim($2); dm=trim($3)
if(lv=="" || vg=="") next
# 直接匹配 dm_path / src_dev / real_dev
if(dm==s || dm==r) {print lv "|" vg; exit}
}
'
)"
if [[ -n "$hit" ]]; then
printf '%s' "$hit"
return 0
fi
# 再放宽readlink -f(dm_path) 与 real_dev 比较(处理 dm_path 是 /dev/mapper/* 的情况)
hit="$(
echo "$out" | while IFS= read -r line; do
[[ -z "$line" ]] && continue
local lv vg dm dm_real
lv="$(echo "$line" | awk -F'|' '{gsub(/^[ \t]+|[ \t]+$/,"",$1);print $1}')"
vg="$(echo "$line" | awk -F'|' '{gsub(/^[ \t]+|[ \t]+$/,"",$2);print $2}')"
dm="$(echo "$line" | awk -F'|' '{gsub(/^[ \t]+|[ \t]+$/,"",$3);print $3}')"
[[ -z "$lv" || -z "$vg" || -z "$dm" ]] && continue
dm_real="$(readlink -f "$dm" 2>/dev/null || true)"
if [[ "$dm_real" == "$real_dev" ]]; then
echo "${lv}|${vg}"
break
fi
done
)"
if [[ -n "$hit" ]]; then
printf '%s' "$hit"
return 0
fi
fi
# B) 回退:用 lv_kernel_major/minor 匹配 MAJ:MIN跨版本最通用
# > 注意:部分环境列名可能不同,但大多数 lvm2 支持这两个字段
if out2="$(lvs --noheadings --separator '|' -o lv_path,vg_name,lv_kernel_major,lv_kernel_minor 2>/dev/null)"; then
local major minor
major="${mm%%:*}"
minor="${mm##*:}"
local hit2
hit2="$(
echo "$out2" | awk -F'|' -v mj="$major" -v mn="$minor" '
function trim(x){gsub(/^[ \t]+|[ \t]+$/,"",x); return x}
{
lv=trim($1); vg=trim($2); kmj=trim($3); kmn=trim($4)
if(lv=="" || vg=="") next
if(kmj==mj && kmn==mn) {print lv "|" vg; exit}
}
'
)"
if [[ -n "$hit2" ]]; then
printf '%s' "$hit2"
return 0
fi
fi
# C) 兜底lsblk 直接读 VG/LV 字段(某些 util-linux 会提供)
if vg="$(lsblk -dn -o VG "$real_dev" 2>/dev/null | awk '{$1=$1;print}')" \
&& lvname="$(lsblk -dn -o LV "$real_dev" 2>/dev/null | awk '{$1=$1;print}')"; then
if [[ -n "$vg" && -n "$lvname" ]]; then
printf '/dev/%s/%s|%s' "$vg" "$lvname" "$vg"
return 0
fi
fi
return 1
}
### 将 findmnt 的 SOURCE 解析为可用的块设备路径(兼容 UUID/LABEL
# @param source string 挂载源(/dev/xxx, UUID=..., LABEL=...
# @return 0 成功echo 输出 /dev/xxx非0 失败
# @require blkid, printf
resolve_mount_source_device() {
local source="$1"
local dev=""
case "$source" in
/dev/*)
dev="$source"
;;
UUID=*|LABEL=*)
# > 关键步骤:把 UUID=xxxx / LABEL=xxx 解析成 /dev/xxx
dev="$(blkid -o device -t "$source" 2>/dev/null | head -n1 || true)"
;;
*)
# 少数情况下 SOURCE 可能是 mapper 名称等,尝试补全
if [[ -e "/dev/$source" ]]; then
dev="/dev/$source"
fi
;;
esac
if [[ -z "$dev" || ! -e "$dev" ]]; then
log_error "无法将挂载源解析为设备路径: source=$source"
return 1
fi
if [[ ! -b "$dev" ]]; then
log_error "解析后的路径不是块设备: dev=$dev (from source=$source)"
return 1
fi
printf '%s' "$dev"
}
### 规范化 LV 路径并获取 VG 名称(强健版:避免 lvs --select 兼容性坑)
# @param source string 挂载源findmnt SOURCE
# @return 0 成功
# @require readlink, lvs, awk, lsblk, blkid
normalize_lv_and_vg() {
local source="$1"
local src_dev real_dev out
src_dev="$(resolve_mount_source_device "$source")"
real_dev="$(readlink -f "$src_dev")"
if [[ -z "$real_dev" || ! -b "$real_dev" ]]; then
log_error "无法解析真实设备路径: src_dev=$src_dev real_dev=$real_dev"
return 1
fi
log_debug "mount source resolve: source=$source src_dev=$src_dev real_dev=$real_dev"
if ! out="$(lookup_lv_vg_from_lvs "$src_dev" "$real_dev")"; then
log_error "无法从挂载源反查 LVM LV/VGsource=$source src_dev=$src_dev real_dev=$real_dev"
log_error "诊断建议:执行 `lvs -a -o lv_path,vg_name,lv_dm_path,lv_kernel_major,lv_kernel_minor` 查看映射。"
return 1
fi
lv_path="${out%%|*}"
vg_name="${out##*|}"
if [[ -z "$lv_path" || -z "$vg_name" ]]; then
log_error "解析 LV/VG 失败: out=$out"
return 1
fi
# 额外校验lv_path 设备存在(有的环境返回的是 /dev/vg/lv应该存在
if [[ ! -e "$lv_path" ]]; then
log_warn "lvs 返回的 lv_path 不存在,尝试 readlink/替代路径: lv_path=$lv_path"
# > 这里不强制失败,后续 lvextend 会更明确报错
fi
log_info "已解析目标: vg=$vg_name lv=$lv_path (from source=$source)"
}
### 综合探测目标 LVM
# @return 0 成功
# @require none
detect_target_lvm_by_mount() {
get_mount_info "$target_mount"
normalize_lv_and_vg "$mount_source"
# fs 工具依赖补充检查
if [[ "$fs_type" == "ext4" ]]; then
require_cmd resize2fs
else
require_cmd xfs_growfs
fi
log_info "目标挂载点: $target_mount"
log_info "挂载源设备: $mount_source"
log_info "文件系统类型: $fs_type"
log_info "LV 路径: $lv_path"
log_info "VG 名称: $vg_name"
}
#===============================================================================
# 新增裸盘安全检查模块
#===============================================================================
### 校验裸盘是否安全可用
# @param disk string 裸盘路径(/dev/sdX 或 /dev/nvmeXnY
# @return 0 成功
# @require lsblk, pvs, wipefs
validate_raw_disk_safe() {
local disk="$1"
if [[ ! -b "$disk" ]]; then
log_error "指定磁盘不是块设备: $disk"
return 1
fi
# 必须是 disk 类型
local dtype
dtype="$(lsblk -dn -o TYPE "$disk" 2>/dev/null || true)"
if [[ "$dtype" != "disk" ]]; then
log_error "指定设备不是磁盘(disk)类型: $disk (type=$dtype)"
return 1
fi
# 不能已作为 PV
if pvs --noheadings -o pv_name 2>/dev/null | awk '{$1=$1;print}' | grep -Fxq "$disk"; then
log_error "磁盘已是 LVM PV拒绝操作: $disk"
return 1
fi
# 不能存在已挂载分区
local mps
mps="$(lsblk -nr -o MOUNTPOINT "$disk" 2>/dev/null | awk 'NF{print}')"
if [[ -n "$mps" ]]; then
log_error "检测到磁盘或其分区已挂载,拒绝操作: $disk"
log_error "挂载点: $mps"
return 1
fi
# 若存在子分区/签名,默认拒绝(--force 可放宽)
local has_children has_sig
has_children="$(lsblk -nr "$disk" -o NAME | awk 'NR>1{print}' | wc -l | awk '{$1=$1;print}')"
has_sig="0"
if wipefs -n "$disk" | awk 'NR>1{exit 0} END{exit 1}'; then
has_sig="1"
fi
if [[ "$force" != "1" ]]; then
if [[ "$has_children" != "0" ]]; then
log_error "磁盘存在分区/子设备(默认拒绝)。可加 --force 继续: $disk"
return 1
fi
if [[ "$has_sig" == "1" ]]; then
log_error "磁盘存在文件系统/签名(默认拒绝)。可加 --force 继续: $disk"
return 1
fi
else
# --force 仍然禁止“已作为PV的分区”
if pvs --noheadings -o pv_name 2>/dev/null | awk '{$1=$1;print}' | grep -q "^${disk}"; then
log_error "检测到磁盘相关分区已是 PV即使 --force 也拒绝): $disk"
return 1
fi
fi
log_info "新增裸盘安全检查通过: $disk"
}
#===============================================================================
# 分区与 PV/VG/LV 扩展模块
#===============================================================================
### 生成分区路径(兼容 nvme 设备命名)
# @param disk string 磁盘路径
# @return 0 成功echo 输出分区路径)
# @require printf
make_partition_path() {
local disk="$1"
# > nvme/mmc 通常以数字结尾,需要 p1
if [[ "$disk" =~ [0-9]$ ]]; then
printf '%sp1' "$disk"
else
printf '%s1' "$disk"
fi
}
### 创建 GPT 分区并设置为 LVM 类型1号分区使用全盘
# @param disk string 裸盘
# @return 0 成功
# @require sgdisk 或 parted, partprobe, udevadm, wipefs
prepare_gpt_partition_for_lvm() {
local disk="$1"
new_part="$(make_partition_path "$disk")"
# 回滚:尽量恢复到“无分区表/无签名”的状态
# > 注意:这是破坏性回滚,仅针对“新增裸盘”
push_rollback "wipefs -a '$disk' || true"
if command -v sgdisk >/dev/null 2>&1; then
push_rollback "sgdisk --zap-all '$disk' || true"
else
# parted 无直接 zap-all用 dd 清前 10MiB尽力而为
push_rollback "dd if=/dev/zero of='$disk' bs=1M count=10 conv=fsync || true"
fi
log_info "开始为裸盘创建 GPT + LVM 分区: disk=$disk part=$new_part"
# > 关键步骤:先清签名,再分区
run_cmd wipefs -a "$disk"
if command -v sgdisk >/dev/null 2>&1; then
# > 关键步骤:清旧分区表并创建新 GPT
run_cmd sgdisk --zap-all "$disk"
run_cmd sgdisk -og "$disk"
# > 关键步骤:创建 1号分区全盘类型 8e00(Linux LVM)
run_cmd sgdisk -n 1:0:0 -t 1:8e00 -c 1:"lvm-pv" "$disk"
else
# parted 方式
# > 关键步骤:创建 GPT + 单分区 + LVM 标记
run_cmd parted -s "$disk" mklabel gpt
run_cmd parted -s "$disk" mkpart primary 1MiB 100%
run_cmd parted -s "$disk" set 1 lvm on
fi
run_cmd partprobe "$disk"
run_cmd udevadm settle
if [[ ! -b "$new_part" ]]; then
log_error "分区设备未出现(可能需要稍等或内核未识别): $new_part"
return 1
fi
log_info "分区创建完成: $new_part"
}
### 创建 PV 并扩展到目标 VG
# @param part string 新分区
# @param vg string 目标 VG 名称
# @return 0 成功
# @require pvcreate, vgextend, vgreduce, pvremove
create_and_attach_pv_to_vg() {
local part="$1"
local vg="$2"
# 防止误用:新分区不应已是 PV
if pvs --noheadings -o pv_name 2>/dev/null | awk '{$1=$1;print}' | grep -Fxq "$part"; then
log_error "分区已是 PV拒绝重复操作: $part"
return 1
fi
log_info "创建 PV: $part"
run_cmd pvcreate -y "$part"
push_rollback "pvremove -ff -y '$part' || true"
log_info "扩展 VG: vgextend $vg $part"
run_cmd vgextend "$vg" "$part"
push_rollback "vgreduce '$vg' '$part' || true"
}
### 扩展 LV 并增长文件系统(使用 -r 自动处理)
# @param lv string LV 路径
# @return 0 成功
# @require lvextend
extend_lv_and_grow_fs() {
local lv="$1"
log_info "扩展 LV 并自动扩容文件系统: lvextend -l +100%FREE -r $lv"
run_cmd lvextend -l +100%FREE -r "$lv"
# > 关键步骤:一旦走到这里,磁盘变更已提交,不应再做自动回滚
committed="1"
log_info "已提交变更LV/FS 扩容已完成),后续将禁止自动回滚。"
}
#===============================================================================
# 结果校验模块
#===============================================================================
### 校验扩展结果修复PV->VG 校验误判)
# @param mount_path string 挂载目录
# @param lv string LV 路径
# @param part string 新增 PV 分区
# @param vg string VG 名称
# @return 0 成功
# @require df, lvs, vgs, pvs, awk, readlink
verify_result() {
local mount_path="$1"
local lv="$2"
local part="$3"
local vg="$4"
log_info "开始校验扩展结果..."
# 1) PV 是否在 VG 中(结构化解析 + 归一化路径)
local part_real
part_real="$(readlink -f "$part" 2>/dev/null || true)"
[[ -z "$part_real" ]] && part_real="$part"
local found="0"
while IFS='|' read -r pv_name pv_vg; do
pv_name="$(awk '{$1=$1;print}' <<<"${pv_name:-}")"
pv_vg="$(awk '{$1=$1;print}' <<<"${pv_vg:-}")"
[[ -z "$pv_name" || -z "$pv_vg" ]] && continue
local pv_real
pv_real="$(readlink -f "$pv_name" 2>/dev/null || true)"
[[ -z "$pv_real" ]] && pv_real="$pv_name"
if [[ "$pv_real" == "$part_real" && "$pv_vg" == "$vg" ]]; then
found="1"
break
fi
done < <(pvs --noheadings --separator '|' -o pv_name,vg_name 2>/dev/null || true)
if [[ "$found" != "1" ]]; then
log_error "校验失败:新增 PV 未正确加入 VG: part=$part (real=$part_real) vg=$vg"
log_error "诊断信息pvs -o pv_name,vg_name 输出如下(供排查):"
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] 跳过诊断输出"
else
pvs --noheadings --separator '|' -o pv_name,vg_name >&2 || true
fi
return 1
fi
# 2) LV 是否可查询
if ! lvs --noheadings -o lv_path 2>/dev/null | awk '{$1=$1;print}' | grep -Fxq "$lv"; then
log_error "校验失败:无法查询到 LV: $lv"
return 1
fi
# 3) df 输出(人类可读)
log_info "df -h$mount_path"
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] 跳过 df -h"
else
df -h "$mount_path" >&2
fi
log_info "校验通过。"
}
#===============================================================================
# 主流程
#===============================================================================
### 主函数
# @return 0 成功
# @require none
main() {
parse_args "$@"
init_traps
require_root
check_dependencies
detect_target_lvm_by_mount
validate_raw_disk_safe "$raw_disk"
prepare_gpt_partition_for_lvm "$raw_disk"
create_and_attach_pv_to_vg "$new_part" "$vg_name"
extend_lv_and_grow_fs "$lv_path"
if ! verify_result "$target_mount" "$lv_path" "$new_part" "$vg_name"; then
log_warn "校验未通过但扩容步骤已完成。请人工复核pvs/vgs/lvs/df。"
exit 0
fi
# 成功后:清空回滚栈(避免 EXIT 误触发回滚;本脚本回滚仅在 ERR trap
rollback_stack=()
log_info "扩展成功mount=$target_mount lv=$lv_path vg=$vg_name new_pv=$new_part"
}
main "$@"
# pvs -o pv_name,vg_name,pv_size,pv_free
# vgs datavg -o vg_name,vg_size,vg_free
# lvs datavg/lvdata -o lv_path,lv_size,seg_count
# df -hT /var/lib/docker
# 综合目的是为了支持linux环境下新增磁盘逻辑卷的拓展请实现如下的功能
# 1. 参数变量
# 1. 设置需要格式化的裸盘名称
# 2. 设置需要扩展的磁盘目录
# 2. 实际脚本
# 1. 检查脚本需要使用的依赖
# 2. 根据需要扩展的磁盘目录检测到相应的PV LV名称磁盘格式为ext4或者XFS
# 3. 需要将新格式化的裸盘进行GPT格式的修改PV创建然后将PV扩展至需要扩展的磁盘目录的LV逻辑卷中
# 4. 检查实际的扩展是否成功,如果失败,恢复新格式化的裸盘清除的功能,恢复至原本状态

View File

@@ -0,0 +1,17 @@
请以Bash Shell脚本高级开发工程师的身份严格遵循以下编程规范实现指定功能
1. 代码结构规范
- 符合POSIX标准与Bash最佳实践v5.0+
- 实现清晰的模块划分和函数封装
- 采用防御性编程策略处理异常情况
- 包含完善的错误处理机制trap、set -euo pipefail
2. 函数设计标准
- 函数声明需包含: 功能描述段(使用###注释块 参数说明:@param <变量名> <数据类型> <用途说明> 返回值说明:@return <退出码> <状态描述> 环境依赖:@require <依赖项>
- 函数参数命名采用snake_case格式体现语义化特征
3. 文档规范
- 主脚本头部包含: 元数据声明(作者、版本、许可证) 全局常量定义区 模块依赖说明
- 关键算法步骤添加行内注释(# > 开头)
- 维护完整的函数调用关系图使用ASCII流程图
4. 质量保障
- 通过ShellCheck进行静态检测
- 统一的日志函数实现详细的日志分级输出DEBUG/INFO/WARN/ERROR