新增雄安空能院项目

This commit is contained in:
zeaslity
2025-08-12 09:59:32 +08:00
parent ce4165e36b
commit 4b274a02c8
79 changed files with 16048 additions and 211 deletions

View File

@@ -0,0 +1,112 @@
#!/bin/bash
set -eo pipefail
# 定义脚本参数
DOCKER_VERSION="20.10" # 在这里修改期望的版本
UBUNTU_IDS=("18.04" "20.04" "22.04" "24.04")
ALIYUN_MIRROR="https://mirrors.aliyun.com"
DOCKER_COMPOSE_VERSION="2.26.1"
# 1. 检测Ubuntu环境
check_ubuntu() {
if ! command -v lsb_release &> /dev/null || [[ $(lsb_release -is) != "Ubuntu" ]]; then
echo "错误本脚本仅支持Ubuntu系统"
exit 1
fi
local version_id=$(lsb_release -rs)
if [[ ! " ${UBUNTU_IDS[*]} " =~ " ${version_id} " ]]; then
echo "错误不支持的Ubuntu版本 ${version_id},支持版本:${UBUNTU_IDS[*]}"
exit 1
fi
}
# 2. 替换阿里云源
set_aliyun_mirror() {
sudo sed -i "s/archive.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo sed -i "s/security.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates
}
# 3. 准备Docker仓库
prepare_docker_env() {
sudo mkdir -p /etc/apt/keyrings
curl -fsSL $ALIYUN_MIRROR/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
local codename=$(lsb_release -cs)
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $ALIYUN_MIRROR/docker-ce/linux/ubuntu $codename stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
}
# 4. 版本解析优化版本
get_docker_version() {
local target_version=""
if [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then
# 提取大版本下最高小版本
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}([.-]|\~\w+)" \
| sort -rV \
| head -1)
elif [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# 精确版本匹配
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}.*$(lsb_release -cs)" )
fi
[ -z "$target_version" ] && echo "错误找不到Docker版本 $DOCKER_VERSION" && exit 1
echo "$target_version" | sed 's/^[0-9]+://' # 去除前缀
}
# 5. 主流程
main() {
check_ubuntu
echo "-- 设置阿里云源 --"
set_aliyun_mirror
echo "-- 准备Docker仓库 --"
prepare_docker_env
echo "-- 解析Docker版本 --"
local full_version=$(get_docker_version)
echo "选择版本:$full_version"
echo "-- 安装组件 --"
sudo apt-get install -y \
docker-ce-cli="$full_version" \
docker-ce="$full_version" \
docker-ce-rootless-extras="$full_version" \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
echo "-- 安装docker-compose --"
sudo curl -sSL "https://get.daocloud.io/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "-- 禁用自动更新 --"
sudo apt-mark hold docker-ce docker-ce-cli containerd.io
echo "-- 启动服务 --"
sudo systemctl enable docker && sudo systemctl start docker
echo -e "\n=== 安装完成 ==="
docker --version
docker-compose --version
}
main
请写一个shell基于上述的部分安装逻辑实现如下的功能
脚本前面提取变量 docker的版本号 20.10.15 或 20.10(安装小版本最高的版本)
1. 检测当前主机是否是ubuntu环境本脚本支支持Ubuntu
2. 获取本机的版本号支持ubuntu18.04 20.04 22.04 24.04的版本
3. 根据ubuntu版本修改apt的镜像源为阿里源
4. 在线安装符合变量版本的docker在线安装docker-compose安装常用的插件
5. 禁止docker自动更新

View File

@@ -28,30 +28,16 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base harbor install
# 主节点执行
# 安装octopus-agent
mv agent-wdd_linux_amd64 /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd
# 主节点安装ssh-key
/usr/local/bin/agent-wdd base ssh config
/usr/local/bin/agent-wdd base ssh key
# 批量执行命令
host_list=(
172.16.100.56
172.16.100.57
172.16.100.58
)
host_list=(
172.16.100.62
172.16.100.51
172.16.100.52
172.16.100.53
172.16.100.54
172.16.100.55
172.16.100.56
172.16.100.57
172.16.100.58
172.16.100.59
172.16.100.60
172.16.100.61
)
host_list=(
172.16.100.56
172.16.100.57
@@ -63,6 +49,9 @@ for server in "${host_list[@]}";do
echo ""
done
# 主节点批量安装key
# 复制 同步文件
export server=172.16.100.62
@@ -70,6 +59,14 @@ scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
# 安装docker-compose
mv docker-compose-linux-x86_64 /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
# 磁盘初始化
ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/disk.sh root@${server}:/root/wdd/
@@ -83,7 +80,7 @@ scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
@@ -102,4 +99,21 @@ ssh root@${server} "cat /etc/docker/daemon.json"
ssh root@${server} "systemctl restart docker"
ssh root@${server} "docker info"
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
# 主节点执行 安装harbor仓库
/usr/local/bin/agent-wdd base harbor install
# 安装rke kubectl
mv /root/wdd/rke_amd64 /usr/local/bin/rke
chmod +x /usr/local/bin/rke
mv /root/wdd/kubectl /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl
# 安装 k8s-证书
curl -s https://172.29.137.125

View File

@@ -1,7 +1,7 @@
#!/bin/bash
#nfs_data_path="/var/lib/docker/nfs_data"
nfs_data_path="/data/nfs_data"
nfs_data_path="/var/lib/docker/nfs_data"
#nfs_data_path="/data/nfs_data"
deploy_nfs_server(){

View File

@@ -1,8 +1,9 @@
upstream proxy_server {
ip_hash;
server 172.16.100.55:30500;
server 172.16.100.59:30500;
server 172.16.100.60:30500;
server 192.168.0.2:30500;
server 192.168.0.4:30500;
server 192.168.0.5:30500;
server 192.168.0.6:30500;
}
server {
@@ -21,7 +22,7 @@ server {
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.eedsjc-uavms.io;
proxy_set_header Host fake-domain.xakny.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

View File

@@ -0,0 +1,50 @@
export tenant_name=outside
export inner_master_ip=Master节点的内网IP
export minio_host_ip=MINIO的内网IP
mc alias set ${tenant_name} http://${minio_host_ip}:9000 cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata ${tenant_name}/ilm-detect ${tenant_name}/ilm-geodata
echo ""
echo "set rabbit mq"
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
echo ""
echo "sleep 5 s!"
sleep 5
mc admin service restart ${tenant_name}
echo "sleep 5 s!"
sleep 5
echo ""
echo "start to add event notification !"
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-detect arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus
echo ""
echo "done of init !"

View File

@@ -18,6 +18,9 @@ kubectl delete -f k8s-nfs-test.yaml
cd /var/lib/docker/nfs_data
kubectl create ns xakny
kubectl apply -f k8s-pvc.yaml
kubectl delete -f k8s-pvc.yaml
@@ -37,13 +40,33 @@ kubectl delete -f k8s-redis.yaml
kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml
----
doris部署
---
kubectl apply -f doris-pvc.yaml
kubectl apply -f doris-fe-configmap.yaml
kubectl apply -f doris-be-configmap.yaml
kubectl apply -f doris-be-internal-service.yaml
kubectl apply -f doris-be-service.yaml
kubectl apply -f doris-fe-internal-service.yaml
kubectl apply -f doris-fe-service.yaml
kubectl apply -f doris-fe-statusfulset.yaml
kubectl delete -f doris-fe-statusfulset.yaml
kubectl apply -f doris-be-statusfulset.yaml
kubectl delete -f doris-be-statusfulset.yaml
---
数据库初始化
---
kubectl apply -f k8s-nacos.yaml
kubectl delete -f k8s-nacos.yaml
---
vim k8s-configmap.yaml
kubectl apply -f k8s-configmap.yaml
kubectl delete -f k8s-configmap.yaml

View File

@@ -1,6 +1,6 @@
export harbor_host=172.16.100.55:8033
export harbor_host=192.168.0.2:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -8,7 +8,7 @@ env:
value: "eth0"
# 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens18
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
# 删除所有的calico pod

View File

@@ -5,7 +5,7 @@ gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
DockerRegisterDomain="172.16.100.55:8033" # 需要根据实际修改
DockerRegisterDomain="192.168.0.2:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
@@ -116,9 +116,9 @@ Load_Tag_Upload(){
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/uavms-2.0"
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/uavms-2.0/"
oss_prefix_url="$oss_prefix_url/cmii/"
ltu
shift # past argument
;;
@@ -163,6 +163,6 @@ test(){
}
# test
#Download_Load_Tag_Upload "cmii"
Download_Load_Tag_Upload "rke"
Load_Tag_Upload "cmii"
# Load_Tag_Upload "cmii"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
name_space=szgz
name_space=zjyd
delete_all_fronted_cmii_pod(){

View File

@@ -20,7 +20,7 @@
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# xfs_growfs /dev/mapper/centos-r oot
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
@@ -72,9 +72,9 @@ echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
s
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# lvextend -l +100%FREE /dev/mapper/s${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'

View File

@@ -0,0 +1,84 @@
#!/bin/bash
set -e
# 用户配置部分
DISK="/dev/sdb" # 要操作的物理磁盘(请根据实际情况修改)
MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
FS_TYPE="ext4" # 文件系统类型支持ext4/xfs默认ext4
#----------------------------------------------------------
# 核心逻辑(建议非必要不修改)
#----------------------------------------------------------
function check_prerequisites() {
# 必须root权限运行检查
[[ $EUID -ne 0 ]] && echo -e "\033[31m错误必须使用root权限运行此脚本\033[0m" && exit 1
# 磁盘存在性检查
[[ ! -b "$DISK" ]] && echo -e "\033[31m错误磁盘 $DISK 不存在\033[0m" && exit 1
# 文件系统类型校验
if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
echo -e "\033[31m错误不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
exit 1
fi
}
function prepare_disk() {
local partition="${DISK}1"
echo -e "\033[34m正在初始化磁盘分区...\033[0m"
parted "$DISK" --script mklabel gpt
parted "$DISK" --script mkpart primary 0% 100%
parted "$DISK" --script set 1 lvm on
partprobe "$DISK" # 确保系统识别新分区表
echo -e "\033[34m正在创建LVM结构...\033[0m"
pvcreate "$partition"
vgcreate datavg "$partition"
lvcreate -y -l 100%FREE -n lvdata datavg
}
function format_and_mount() {
echo -e "\033[34m格式化逻辑卷...\033[0m"
if [[ "$FS_TYPE" == "ext4" ]]; then
mkfs.ext4 -F "/dev/datavg/lvdata"
else
mkfs.xfs -f "/dev/datavg/lvdata"
fi
echo -e "\033[34m设置挂载配置...\033[0m"
mkdir -p "$MOUNT_PATH"
UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
mount -a
}
function verify_result() {
echo -e "\n\033[1;36m最终验证结果\033[0m"
lsblk -f "$DISK"
echo -e "\n磁盘空间使用情况"
df -hT "$MOUNT_PATH"
}
# 主执行流程
check_prerequisites
prepare_disk
format_and_mount
verify_result
echo -e "\n\033[32m操作执行完毕请仔细核查上述输出信息\033[0m"
#请写一个shell脚本脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
#1.将物理磁盘的盘符修改为gpt格式
#2.将物理磁盘全部空间创建一个分区分区格式为lvm
#3.将分区分配给逻辑卷datavg
#4.将datavg所有可用的空间分配给逻辑卷lvdata
#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
#6.创建变量挂载点路径
#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
#8.执行lsblk和df -TH查看分区是否正确挂载