This commit is contained in:
zeaslity
2024-10-30 16:30:51 +08:00
commit 437acbeb63
3363 changed files with 653948 additions and 0 deletions

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env bash
# 需要在所有的节点执行
hostnamectl set-hostname master-node
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
systemctl restart sshd
cat >> /etc/hosts << EOF
20.47.129.116 master-node
20.47.129.117 worker-1
20.47.129.118 worker-2
20.47.129.119 worker-3
20.47.129.120 storage-1
EOF
yum clean all && yum makecache
root
20.47.129.116
LYGapp_2023!

View File

@@ -0,0 +1,95 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
yum install lvm2 -y
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/sda
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'
echo "n
p
t
8e
w
" | fdisk /dev/vda
partprobe
vgextend klas_host-10-190-202-141 /dev/vda4
lvextend -l +100%FREE /dev/mapper/klas_host--10--190--202--141-root
partprobe
xfs_growfs /dev/mapper/klas_host--10--190--202--141-root
df -TH

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,147 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
cmlc_app_image_list="cmlc-app-images-4.1.6.txt" # 需要修改版本
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
middleware_image_list="middleware-images.txt" # 一般不需要修改
#DockerRegisterDomain="20.47.129.116:8033" # 需要根据实际修改
DockerRegisterDomain="172.10.125.92:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
downloadAllNeededImages() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
shift
done
}
downloadAllNeededImagesAndCompress() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
shift
done
echo "已经完成打包工作!"
}
pushRKEImageToHarbor(){
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}");
done < "${rancher_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
case $i in
*/*)
image_name="${DockerRegisterDomain}/${i}"
;;
*)
image_name="${DockerRegisterDomain}/rancher/${i}"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${i}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushCMLCAPPImageToHarbor(){
app_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
app_images+=("${i}");
done < "${cmlc_app_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${app_images[@]}"; do
[ -z "${app}" ] && continue
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushMiddlewareImageToHarbor(){
middleware_image=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
middleware_image+=("${i}");
done < "${middleware_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${middleware_image[@]}"; do
[ -z "${app}" ] && continue
case ${app} in
*/*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
;;
*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
#downloadAllNeededImagesAndCompress "${middleware_image_list}"
downloadAllNeededImages "${rancher_image_list}"
#pushRKEImageToHarbor
#pushCMLCAPPImageToHarbor
#pushMiddlewareImageToHarbor

View File

@@ -0,0 +1,238 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
### 需要修改以下的内容 ###
### 需要修改以下的内容 ###
# 理论上来说,能访问公网的服务器 用来部署Harbor服务器
# 所有的主机均可以访问公网的话,填写 除了harbor服务器的 其他所有主机的地址
PrivateServerIPs=(10.215.125.15 10.215.125.16 10.215.125.17) # 内网服务器的IP地址不包括可以访问公网IP的服务器
### 需要修改以上的内容 ###
### 需要修改以上的内容 ###
### 需要修改以上的内容 ###
RED="31m" ## 姨妈红
GREEN="32m" ## 水鸭青
YELLOW="33m" ## 鸭屎黄
PURPLE="35m" ## 基佬紫
BLUE="36m" ## 天依蓝
colorEcho() {
# shellcheck disable=SC2145
echo -e "\033[${1}${@:2}\033[0m" 1>&2
}
check_root() {
if [[ $EUID != 0 ]]; then
colorEcho ${RED} "当前非root账号(或没有root权限)无法继续操作请更换root账号!"
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限执行后可能会提示输入root密码"
exit 1
fi
}
startFunc(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始执行 启动RKE集群的操作 ………"
echo ""
colorEcho ${BLUE} "本脚本的运行有一些列的前提依赖,请确定以下的项目都已完成!!!!"
colorEcho ${YELLOW} "----------------------------------------------------------"
colorEcho ${RED} "1. 完成基础环境初始化将rke系统镜像均上传至私有Harbor中"
colorEcho ${RED} "2. 配置并修改好 rke集群的模板文件命名为 cluster.yml !!"
colorEcho ${RED} "3. ……"
colorEcho ${YELLOW} "----------------------------------------------------------"
while true; do
colorEcho ${RED} "请确保您已经将上述的项目完成!!"
read -r -p "请输入yes进行确认脚本才可继续运行" input
case $input in
yes)
colorEcho ${GREEN} "您已确认上述的项目均已完成!!"
colorEcho ${GREEN} "----------------------------------------------------------"
echo ""
colorEcho ${BLUE} "开始执行 RKE集群的启动过程"
echo ""
main
break
;;
*)
echo ""
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
break
colorEcho ${RED} "-----------------------------------------------------"
echo ""
;;
esac
done
}
installRKE(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始下载并安装 RKE 工具 ………"
echo ""
colorEcho ${BLUE} "开始从rancher镜像下载rke工具……"
wget http://rancher-mirror.cnrancher.com/rke/v1.2.6/rke_linux-amd64
if [ -s rke_linux-amd64 ]; then
colorEcho ${GREEN} "rke工具下载完成"
chmod +x rke_linux-amd64
mv ./rke_linux-amd64 /usr/local/bin/rke
colorEcho ${GREEN} "----------------------------------------------------------"
rke --version
colorEcho ${GREEN} "----------------------------------------------------------"
rke config --list-version --all
echo ""
colorEcho ${BLUE} "开始从rancher镜像下载 kubectl 工具……"
wget http://rancher-mirror.cnrancher.com/kubectl/v1.20.4/linux-amd64-v1.20.4-kubectl
chmod +x linux-amd64-v1.20.4-kubectl
mv linux-amd64-v1.20.4-kubectl /usr/local/bin/kubectl
colorEcho ${GREEN} "----------------------------------------------------------"
kubectl version
colorEcho ${GREEN} "----------------------------------------------------------"
else
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
return 1
fi
}
createRKEInstallerUser(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始创建 rke-installer 用户………"
echo ""
useradd rke-installer
echo "rke-installer
rke-installer
" | passwd rke-installer
#将登陆用户develop加入到docker用户组中
gpasswd -a rke-installer docker
#更新用户组
newgrp docker
echo ""
if [ -d /home/rke-installer ]; then
colorEcho ${GREEN} "rke-installer 用户创建成功!! "
echo ""
else
mkdir -p /home/rke-installer
chown rke-installer:rke-installer -R /home/rke-installer
usermod -d /home/rke-installer rke-installer
colorEcho ${YELLOW} "检测到 rke-installer 用户已经存在"
fi
if [[ -s cluster.yaml || -s cluster.yml ]]; then
colorEcho ${BLUE} "开始将 cluster.yaml文件复制到 rke-installer目录下…………"
mv cluster.y* /home/rke-installer/cluster.yml
if [ -s /home/rke-installer/cluster.yml ]; then
colorEcho ${BLUE} "cluster.yml文件已经放置完成"
chown rke-installer:rke-installer /home/rke-installer/cluster.yml
else
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件"
colorEcho ${RED} "程序无法继续,将退出!!"
return 1
fi
else
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件"
colorEcho ${RED} "程序无法继续,将退出!!"
echo ""
colorEcho ${YELLOW} "--------------------------------------------------"
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${YELLOW} "--------------------------------------------------"
return 1
fi
colorEcho ${BLUE} "开始切换当前用户至 rke-installer "
su rke-installer
echo ""
colorEcho ${BLUE} "请检查rke-installer用户能否执行 docker ps 命令!!"
docker ps
colorEcho ${BLUE} "----------------------------------------------------------"
}
generateRKEUserKey(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始创建 rke-installer用户的 ssh key ……"
echo ""
su rke-installer
ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
colorEcho ${GREEN} "--------------------------------------------------------------"
colorEcho ${GREEN} "-----------本机配置完成!-------------"
echo ""
for ip in "${PrivateServerIPs[@]}"; do
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "-----------------------------------------------"
echo ""
echo ""
colorEcho ${RED} " 请以 root 角色 运行!!! "
colorEcho ${RED} " 请以 root 角色 运行!!! "
colorEcho ${RED} " 请以 root 角色 运行!!! "
echo ""
colorEcho ${YELLOW} "useradd rke-installer && echo \"rke-installer
rke-installer
\" | passwd rke-installer && gpasswd -a rke-installer docker && newgrp docker && su rke-installer && docker ps "
echo ""
colorEcho ${YELLOW} "clear && ssh-keygen -t rsa -P \"\" -f ~/.ssh/id_rsa && echo \"$(cat ~/.ssh/id_rsa.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys"
echo ""
echo ""
while true; do
colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!"
read -r -p "请输入yes进行确认脚本才可继续运行" input
case $input in
yes)
colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key"
echo ""
break
;;
*)
echo ""
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
colorEcho ${RED} "请在主机${ip}上执行上述命令!!!"
colorEcho ${RED} "否则本脚本的功能会失效!!"
colorEcho ${RED} "-----------------------------------------------------"
echo ""
;;
esac
done
done
}
startRKECLuster(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始 启动 rke集群 "
colorEcho ${BLUE} "开始 启动 rke集群 "
colorEcho ${BLUE} "开始 启动 rke集群 "
echo ""
if [[ $(pwd) == "/home/rke-installer" ]]; then
colorEcho ${BLUE} "检测到当前目录为 /home/rke-installer"
echo ""
colorEcho ${BLUE} "开始执行 RKE 集群的启动过程 "
colorEcho ${BLUE} "-------------------------------------------------------------"
for i in {3..1..-1}; do
colorEcho ${BLUE} "倒计时开始 ->> $i 秒 <<-准备启动RKE上文的日志输出将会消失"
sleep 2
done
clear
rke up
else
colorEcho ${BLUE} "当前目录不为 /home/rke-installer开始跳转目录"
cd /home/rke-installer
startRKECLuster
fi
}
main(){
check_root
installRKE || return $?
generateRKEUserKey || return $?
startRKECLuster || return $?
}
startFunc

View File

@@ -0,0 +1,62 @@
harbor.cdcyy.com.cn/cmii/cmii-admin-data:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-admin-user:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-open-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-brain:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:3.3.3
harbor.cdcyy.com.cn/cmii/cmii-uav-cms:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-developer:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-device:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-integration:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-logger:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-notice:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-process:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-user:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-live-operator:v4.0.6
harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0
harbor.cdcyy.com.cn/cmii/zlm-mediaserver:v1.0.6
harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:v4.0.0
harbor.cdcyy.com.cn/cmii/cmii-srs-operator:v4.0.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:4.1.0

View File

@@ -0,0 +1,61 @@
harbor.cdcyy.com.cn/cmii/cmii-admin-data:4.1.6-test
harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-admin-user:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-app-release:4.1.6-20006
minio/minio:RELEASE.2022-10-24T18-35-07Z
harbor.cdcyy.com.cn/cmii/cmii-open-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.1.6-cm-0828
harbor.cdcyy.com.cn/cmii/cmii-uav-brain:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-cms:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-developer:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-device:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:4.1.6-beta
harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:4.1.6-24241
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-logger:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-notice:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:4.1.6-24241
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-process:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-user:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-srs-operator:v4.0.0
harbor.cdcyy.com.cn/cmii/cmii-live-operator:v4.0.6
harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0
harbor.cdcyy.com.cn/cmii/zlm-mediaserver:v1.0.6
harbor.cdcyy.com.cn/cmii/nfs-subdir-external-provisioner:v4.0.2

View File

@@ -0,0 +1,69 @@
harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.1.0-staging
harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation
harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.1.6-cm-0828
harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:3.3.3
harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:4.1.6-beta
harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.1.0-staging
harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.1.0-24469-1012
harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.1.0-staging
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.1.0-pr107
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.1.0-24469-1012
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.0.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:4.2.0-hljtt
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.1.0-24092
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.1.0-24092
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.1.0-24469-1012
harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.1.0
harbor.cdcyy.com.cn/cmii/cmii-live-operator:v4.0.6
harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0
harbor.cdcyy.com.cn/cmii/zlm-mediaserver:v1.0.6
harbor.cdcyy.com.cn/cmii/nfs-subdir-external-provisioner:v4.0.2

View File

@@ -0,0 +1,44 @@
hostname: 20.47.129.116
http:
port: 8033
harbor_admin_password: V2ryStr@ngPss
database:
password: V2ryStr@ngPss
max_idle_conns: 50
max_open_conns: 1000
data_volume: /var/lib/docker/harbor-data
clair:
updaters_interval: 12
jobservice:
max_job_workers: 10
notification:
webhook_job_max_retry: 10
chart:
absolute_url: disabled
log:
level: error
local:
rotate_count: 50
rotate_size: 200M
location: /var/log/harbor
_version: 2.0.0
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- clair
- trivy

View File

@@ -0,0 +1,64 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.13.4
rancher/calico-ctl:v3.13.4
rancher/calico-kube-controllers:v3.13.4
rancher/calico-node:v3.13.4
rancher/calico-pod2daemon-flexvol:v3.13.4
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/coredns-coredns:1.6.9
rancher/coreos-etcd:v3.4.3-rancher1
rancher/coreos-flannel:v0.12.0
rancher/coreos-flannel:v0.13.0-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.18.16-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.7
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.18.6
rancher/kubernetes-external-dns:v0.7.3
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.3.6
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.35.0-rancher2
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.1
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.12.0
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,63 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-node:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/coreos-flannel:v0.13.0-rancher1
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/cluster-proportional-autoscaler:1.8.1
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,24 @@
docker.io/bitnami/redis:6.2.6-debian-10-r0
docker.io/bitnami/redis:6.2.14-debian-11-r1
docker.io/bitnami/mysql:8.0.35-debian-11-r1
docker.io/bitnami/mysql:8.1.0-debian-11-r42
docker.io/simonrupf/chronyd:0.4.3
docker.io/bitnami/bitnami-shell:10-debian-10-r140
docker.io/bitnami/bitnami-shell:11-debian-11-r136
docker.io/bitnami/rabbitmq:3.9.12-debian-10-r3
docker.io/bitnami/rabbitmq:3.11.26-debian-11-r2
docker.io/ossrs/srs:v4.0.136
docker.io/emqx/emqx:4.2.12
docker.io/nacos/nacos-server:v2.1.2
docker.io/nacos/nacos-server:v2.1.2-slim
docker.io/mongo:5.0
docker.io/rabbitmq:3.9-management
docker.io/bitnami/minio:2022.5.4
docker.io/bitnami/minio:2023.5.4
docker.io/kubernetesui/dashboard:v2.0.1
docker.io/kubernetesui/metrics-scraper:v1.0.4
docker.io/ossrs/srs:v4.0-r3
docker.io/nginx:1.21.3
docker.io/redis:6.0.20-alpine
docker.io/dyrnq/nfs-subdir-external-provisioner:v4.0.2

View File

@@ -0,0 +1,217 @@
nodes:
- address: 20.47.129.116
user: root
role:
- controlplane
- etcd
- worker
internal_address: 20.47.129.116
labels:
ingress-deploy: true
- address: 20.47.129.117
user: root
role:
- worker
internal_address: 20.47.129.117
- address: 20.47.129.118
user: root
role:
- worker
internal_address: 20.47.129.118
labels:
mysql-deploy: true
- address: 20.47.129.119
user: root
role:
- worker
internal_address: 20.47.129.119
labels:
mysql-deploy: true
authentication:
strategy: x509
sans:
- "20.47.129.116"
private_registries:
- url: 20.47.129.116:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
#ssh_key_path: /root/.ssh/id_ed25519
ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens160
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,77 @@
# 下载所有离线文件
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/kebite-4.1.6.tar.gz
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/middleware-images.tar.gz
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/rancher-1.20.4-image.tar.gz
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/rke
# 批量复制文件
ip_list=(10.20.1.133 10.20.1.134 10.20.1.132)
for ip in "${ip_list[@]}"; do
echo "yes
yes
" | scp /etc/docker/daemon.json root@${ip}:/etc/docker/daemon.json
ssh root@${ip} "systemctl restart docker"
done
ip_list=(10.20.1.133 10.20.1.134 10.20.1.132)
for ip in "${ip_list[@]}"; do
scp /etc/docker/daemon.json root@${ip}:/etc/docker/daemon.json
# scp /etc/ssh/sshd_config root@${ip}:/etc/ssh/sshd_config
ssh root@${ip} "systemctl restart docker"
# ssh root@${ip} "systemctl restart sshd"
done
vim /etc/docker/daemon.json
{
"insecure-registries" : ["20.47.129.116:8033"]
}
systemctl restart docker
list=(iptables)
for Packages in "${list[@]}"
do
apt-get download $(apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances ${Packages} | grep "^\w" | sort -u)
done
sudo dpkg -i ./containerd.io_1.6.15-1_amd64.deb \
./docker-ce-cli_20.10.10~3-0~ubuntu-focal_amd64.deb \
./docker-ce_20.10.10~3-0~ubuntu-focal_amd64.deb \
./docker-ce-rootless-extras_20.10.10~3-0~ubuntu-focal_amd64.deb \
./docker-buildx-plugin_0.11.1-1~ubuntu.20.04~focal_amd64.deb \
./docker-compose-plugin_2.19.1-1~ubuntu.20.04~focal_amd64.deb
rpcinfo -p localhost
# 生成ed25519 版本的ssh key
ssh-keygen -t ed25519 -f .ssh/id_ed25519 -C "m@github"
echo $(cat .ssh/id_ed25519.pub)
echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHXDzet+Z2/AmrzIZpPviz7Z9AMxLWiJUOwtj/3NPauk m@github" >>.ssh/authorized_keys
# 修改calico-node检测的IP
kubectl -n kube-system edit daemonset calico-node
env:
- name: FELIX_INTERFACEPREFIX
value: "eth0"
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306
redis-cli -h 127.0.0.1 -a Mcache@4522
./mysql -uboge -pboge8tingH -h127.0.0.1 -P21306

View File

@@ -0,0 +1,53 @@
第一天
1. 内核配置修改、优化关闭swap、SELinux等服务器环境初始化
2. 通用工具安装curl wget vim ping telnet等、配置免密登录等
3. 安装时间同步服务器,对所有的服务器的时间进行同步
第二天
1. 离线、下载并安装docker服务修改优化docker配置
2. 安装docker-compose 安装Harbor服务器 配置服务器免密拉取镜像
3. 离线下载安装GlusterFS、Heketi、NFS配置安装底层存储服务
第三天
1. 离线下载Kubernetes安装文件
2. 上传Kubernetes离线安装包到所有服务器节点
3. 修改对应的kuberntes安装部署配置
4. 配置apiserver、controller-manager、scheduler、kube-proxy服务文件并安装、安装master和node节点验证Kubernetes集群安装
第四天
1. 安装calico网络插件
2. 验证Kubernetes集群对网络、节点、镜像等验证
3. 下载离线镜像安装服务暴露层Ingress组件
第五天
1. 下载所有中间件所需的镜像,并进行中间件的配置及安装部署
2. 安装MySQL数据库组件并进行配置验证
3. 安装MongoDB并完成配置与验证
4. 安装Redis并完成配置与验证
5. 安装EMQX无人机飞行控制通信组件并完成配置与验证
6. 安装Rabbit MQ消息队列组件并配置验证
7. 安装Nacos微服务注册中心组件并配置验证
第六天
1. 配置存储类、创建存储卷、安装业务层对象存储服务Minio集群
2. 安装视频流媒体业务SRS-Cluster
3. 确保推拉流服务正常运行
第七天
1. 离线下载并上传所有的中移凌云平台业务镜像
2. 根据环境适配中移凌云平台的业务配置并上传
3. 安装中移凌云基础初始化数据,运营平台数据初始化
第八天
1. 部署所有业务组件微服务,确保所有微服务正常运行
2. 初步达到平台的运行(不可使用)
第九天
1. 部署GDR转码服务器完成配置信息
2. 保证GDR服务运行正常
3. 开始初步联调
第十天
1. 中移凌云平台安装部署功能初步验证
2. 平台业务组件运行联测
3. 确保平台核心功能正常可用