This commit is contained in:
zeaslity
2024-10-30 16:30:51 +08:00
commit 437acbeb63
3363 changed files with 653948 additions and 0 deletions

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
# 需要在所有的节点执行
hostnamectl set-hostname master-node
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
systemctl restart sshd
cat >> /etc/hosts << EOF
10.20.1.130 master-node
10.20.1.133 worker-1
10.20.1.134 worker-2
10.20.1.132 storage-1
EOF
yum clean all && yum makecache
36.138.132.240
Zyly2023**

View File

@@ -0,0 +1,77 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
yum install lvm2 -y
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,146 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
cmlc_app_image_list="cmlc-app-images-4.1.6.txt" # 需要修改版本
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
middleware_image_list="middleware-images.txt" # 一般不需要修改
DockerRegisterDomain="10.20.1.130:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
downloadAllNeededImages() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
shift
done
}
downloadAllNeededImagesAndCompress() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
shift
done
echo "已经完成打包工作!"
}
pushRKEImageToHarbor(){
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}");
done < "${rancher_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
case $i in
*/*)
image_name="${DockerRegisterDomain}/${i}"
;;
*)
image_name="${DockerRegisterDomain}/rancher/${i}"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${i}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushCMLCAPPImageToHarbor(){
app_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
app_images+=("${i}");
done < "${cmlc_app_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${app_images[@]}"; do
[ -z "${app}" ] && continue
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushMiddlewareImageToHarbor(){
middleware_image=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
middleware_image+=("${i}");
done < "${middleware_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${middleware_image[@]}"; do
[ -z "${app}" ] && continue
case ${app} in
*/*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
;;
*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
#downloadAllNeededImagesAndCompress "${middleware_image_list}"
#downloadAllNeededImages "${middleware_image_list}"
pushRKEImageToHarbor
#pushCMLCAPPImageToHarbor
#pushMiddlewareImageToHarbor

View File

@@ -0,0 +1,238 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
### 需要修改以下的内容 ###
### 需要修改以下的内容 ###
# 理论上来说,能访问公网的服务器 用来部署Harbor服务器
# 所有的主机均可以访问公网的话,填写 除了harbor服务器的 其他所有主机的地址
PrivateServerIPs=(10.215.125.15 10.215.125.16 10.215.125.17) # 内网服务器的IP地址不包括可以访问公网IP的服务器
### 需要修改以上的内容 ###
### 需要修改以上的内容 ###
### 需要修改以上的内容 ###
RED="31m" ## 姨妈红
GREEN="32m" ## 水鸭青
YELLOW="33m" ## 鸭屎黄
PURPLE="35m" ## 基佬紫
BLUE="36m" ## 天依蓝
colorEcho() {
# shellcheck disable=SC2145
echo -e "\033[${1}${@:2}\033[0m" 1>&2
}
check_root() {
if [[ $EUID != 0 ]]; then
colorEcho ${RED} "当前非root账号(或没有root权限)无法继续操作请更换root账号!"
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限执行后可能会提示输入root密码"
exit 1
fi
}
startFunc(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始执行 启动RKE集群的操作 ………"
echo ""
colorEcho ${BLUE} "本脚本的运行有一些列的前提依赖,请确定以下的项目都已完成!!!!"
colorEcho ${YELLOW} "----------------------------------------------------------"
colorEcho ${RED} "1. 完成基础环境初始化将rke系统镜像均上传至私有Harbor中"
colorEcho ${RED} "2. 配置并修改好 rke集群的模板文件命名为 cluster.yml !!"
colorEcho ${RED} "3. ……"
colorEcho ${YELLOW} "----------------------------------------------------------"
while true; do
colorEcho ${RED} "请确保您已经将上述的项目完成!!"
read -r -p "请输入yes进行确认脚本才可继续运行" input
case $input in
yes)
colorEcho ${GREEN} "您已确认上述的项目均已完成!!"
colorEcho ${GREEN} "----------------------------------------------------------"
echo ""
colorEcho ${BLUE} "开始执行 RKE集群的启动过程"
echo ""
main
break
;;
*)
echo ""
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
break
colorEcho ${RED} "-----------------------------------------------------"
echo ""
;;
esac
done
}
installRKE(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始下载并安装 RKE 工具 ………"
echo ""
colorEcho ${BLUE} "开始从rancher镜像下载rke工具……"
wget http://rancher-mirror.cnrancher.com/rke/v1.2.6/rke_linux-amd64
if [ -s rke_linux-amd64 ]; then
colorEcho ${GREEN} "rke工具下载完成"
chmod +x rke_linux-amd64
mv ./rke_linux-amd64 /usr/local/bin/rke
colorEcho ${GREEN} "----------------------------------------------------------"
rke --version
colorEcho ${GREEN} "----------------------------------------------------------"
rke config --list-version --all
echo ""
colorEcho ${BLUE} "开始从rancher镜像下载 kubectl 工具……"
wget http://rancher-mirror.cnrancher.com/kubectl/v1.20.4/linux-amd64-v1.20.4-kubectl
chmod +x linux-amd64-v1.20.4-kubectl
mv linux-amd64-v1.20.4-kubectl /usr/local/bin/kubectl
colorEcho ${GREEN} "----------------------------------------------------------"
kubectl version
colorEcho ${GREEN} "----------------------------------------------------------"
else
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
return 1
fi
}
createRKEInstallerUser(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始创建 rke-installer 用户………"
echo ""
useradd rke-installer
echo "rke-installer
rke-installer
" | passwd rke-installer
#将登陆用户develop加入到docker用户组中
gpasswd -a rke-installer docker
#更新用户组
newgrp docker
echo ""
if [ -d /home/rke-installer ]; then
colorEcho ${GREEN} "rke-installer 用户创建成功!! "
echo ""
else
mkdir -p /home/rke-installer
chown rke-installer:rke-installer -R /home/rke-installer
usermod -d /home/rke-installer rke-installer
colorEcho ${YELLOW} "检测到 rke-installer 用户已经存在"
fi
if [[ -s cluster.yaml || -s cluster.yml ]]; then
colorEcho ${BLUE} "开始将 cluster.yaml文件复制到 rke-installer目录下…………"
mv cluster.y* /home/rke-installer/cluster.yml
if [ -s /home/rke-installer/cluster.yml ]; then
colorEcho ${BLUE} "cluster.yml文件已经放置完成"
chown rke-installer:rke-installer /home/rke-installer/cluster.yml
else
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件"
colorEcho ${RED} "程序无法继续,将退出!!"
return 1
fi
else
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件"
colorEcho ${RED} "程序无法继续,将退出!!"
echo ""
colorEcho ${YELLOW} "--------------------------------------------------"
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${YELLOW} "--------------------------------------------------"
return 1
fi
colorEcho ${BLUE} "开始切换当前用户至 rke-installer "
su rke-installer
echo ""
colorEcho ${BLUE} "请检查rke-installer用户能否执行 docker ps 命令!!"
docker ps
colorEcho ${BLUE} "----------------------------------------------------------"
}
generateRKEUserKey(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始创建 rke-installer用户的 ssh key ……"
echo ""
su rke-installer
ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
colorEcho ${GREEN} "--------------------------------------------------------------"
colorEcho ${GREEN} "-----------本机配置完成!-------------"
echo ""
for ip in "${PrivateServerIPs[@]}"; do
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "-----------------------------------------------"
echo ""
echo ""
colorEcho ${RED} " 请以 root 角色 运行!!! "
colorEcho ${RED} " 请以 root 角色 运行!!! "
colorEcho ${RED} " 请以 root 角色 运行!!! "
echo ""
colorEcho ${YELLOW} "useradd rke-installer && echo \"rke-installer
rke-installer
\" | passwd rke-installer && gpasswd -a rke-installer docker && newgrp docker && su rke-installer && docker ps "
echo ""
colorEcho ${YELLOW} "clear && ssh-keygen -t rsa -P \"\" -f ~/.ssh/id_rsa && echo \"$(cat ~/.ssh/id_rsa.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys"
echo ""
echo ""
while true; do
colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!"
read -r -p "请输入yes进行确认脚本才可继续运行" input
case $input in
yes)
colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key"
echo ""
break
;;
*)
echo ""
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
colorEcho ${RED} "请在主机${ip}上执行上述命令!!!"
colorEcho ${RED} "否则本脚本的功能会失效!!"
colorEcho ${RED} "-----------------------------------------------------"
echo ""
;;
esac
done
done
}
startRKECLuster(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始 启动 rke集群 "
colorEcho ${BLUE} "开始 启动 rke集群 "
colorEcho ${BLUE} "开始 启动 rke集群 "
echo ""
if [[ $(pwd) == "/home/rke-installer" ]]; then
colorEcho ${BLUE} "检测到当前目录为 /home/rke-installer"
echo ""
colorEcho ${BLUE} "开始执行 RKE 集群的启动过程 "
colorEcho ${BLUE} "-------------------------------------------------------------"
for i in {3..1..-1}; do
colorEcho ${BLUE} "倒计时开始 ->> $i 秒 <<-准备启动RKE上文的日志输出将会消失"
sleep 2
done
clear
rke up
else
colorEcho ${BLUE} "当前目录不为 /home/rke-installer开始跳转目录"
cd /home/rke-installer
startRKECLuster
fi
}
main(){
check_root
installRKE || return $?
generateRKEUserKey || return $?
startRKECLuster || return $?
}
startFunc

View File

@@ -0,0 +1,53 @@
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-device:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-cms:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-notice:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-developer:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-open-gateway:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-admin-user:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-admin-data:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-logger:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-user:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-brain:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-process:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:v4.0.0
harbor.cdcyy.com.cn/cmii/cmii-srs-operator:v4.0.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:4.1.0

View File

@@ -0,0 +1,62 @@
harbor.cdcyy.com.cn/cmii/cmii-admin-data:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-admin-user:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-open-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-brain:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:3.3.3
harbor.cdcyy.com.cn/cmii/cmii-uav-cms:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-developer:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-device:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-integration:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-logger:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-notice:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:4.1.0
harbor.cdcyy.com.cn/cmii/cmii-uav-process:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:4.1.6-0913
harbor.cdcyy.com.cn/cmii/cmii-uav-user:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:4.1.6
harbor.cdcyy.com.cn/cmii/cmii-live-operator:v4.0.6
harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0
harbor.cdcyy.com.cn/cmii/zlm-mediaserver:v1.0.6
harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:v4.0.0
harbor.cdcyy.com.cn/cmii/cmii-srs-operator:v4.0.0
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:4.1.0

View File

@@ -0,0 +1,44 @@
hostname: 10.20.1.130
http:
port: 8033
harbor_admin_password: V2ryStr@ngPss
database:
password: V2ryStr@ngPss
max_idle_conns: 50
max_open_conns: 1000
data_volume: /var/lib/docker/harbor-data
clair:
updaters_interval: 12
jobservice:
max_job_workers: 10
notification:
webhook_job_max_retry: 10
chart:
absolute_url: disabled
log:
level: error
local:
rotate_count: 50
rotate_size: 200M
location: /var/log/harbor
_version: 2.0.0
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- clair
- trivy

View File

@@ -0,0 +1,64 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.13.4
rancher/calico-ctl:v3.13.4
rancher/calico-kube-controllers:v3.13.4
rancher/calico-node:v3.13.4
rancher/calico-pod2daemon-flexvol:v3.13.4
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/coredns-coredns:1.6.9
rancher/coreos-etcd:v3.4.3-rancher1
rancher/coreos-flannel:v0.12.0
rancher/coreos-flannel:v0.13.0-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.18.16-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.7
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.18.6
rancher/kubernetes-external-dns:v0.7.3
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.3.6
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.35.0-rancher2
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.1
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.12.0
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,63 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-node:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/coreos-flannel:v0.13.0-rancher1
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/cluster-proportional-autoscaler:1.8.1
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,26 @@
docker.io/bitnami/redis:6.2.6-debian-10-r0
docker.io/bitnami/mysql:8.0.26-debian-10-r0
docker.io/bitnami/mysql:8.1.0-debian-11-r42
docker.io/bitnami/bitnami-shell:10-debian-10-r140
docker.io/bitnami/rabbitmq:3.9.12-debian-10-r3
docker.io/bitnami/minio:2021.3.26-debian-10-r0
docker.io/ossrs/srs:v4.0.136
docker.io/emqx/emqx:4.2.12
docker.io/nacos/nacos-server:v2.0.1
docker.io/nacos/nacos-server:v2.1.2
docker.io/mongo:5.0
docker.io/rabbitmq:3.7-management
docker.io/rabbitmq:3.9-management
docker.io/v2fly/v2fly-core:v4.38.3
docker.io/pollyduan/ingress-nginx-controller:v0.44.0
docker.io/jettech/kube-webhook-certgen:v1.5.1
docker.io/minio/minio:RELEASE.2022-03-26T06-49-28Z
docker.io/bitnami/minio:2022.5.4
docker.io/minio/console:v0.15.6
docekr.io/minio/operator:v4.4.13
docker.io/kubernetesui/dashboard:v2.0.1
docker.io/kubernetesui/metrics-scraper:v1.0.4
docker.io/ossrs/srs:v4.0-r3
docker.io/nginx:1.21.3
docker.io/minio/console:v0.15.6
docker.io/dyrnq/nfs-subdir-external-provisioner:v4.0.2

View File

@@ -0,0 +1,210 @@
nodes:
- address: 10.20.1.130
user: root
role:
- controlplane
- etcd
- worker
internal_address: 10.20.1.130
labels:
ingress-deploy: true
- address: 10.20.1.133
user: root
role:
- worker
internal_address: 10.20.1.133
- address: 10.20.1.134
user: root
role:
- worker
internal_address: 10.20.1.134
labels:
mysql-deploy: true
authentication:
strategy: x509
sans:
- "10.20.1.130"
private_registries:
- url: 10.20.1.130:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
#ssh_key_path: /root/.ssh/id_ed25519
ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: eth0
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,69 @@
# 下载所有离线文件
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/4.1.6
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/123
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/rancher-1.20.4-image.tar.gz
wget https://oss.demo.uavcmlc.com:18000/cmlc-installation/v4.1.6/rke
# 批量复制文件
ip_list=(10.20.1.133 10.20.1.134 10.20.1.132)
for ip in "${ip_list[@]}"; do
echo "yes
yes
" | scp /etc/docker/daemon.json root@${ip}:/etc/docker/daemon.json
ssh root@${ip} "systemctl restart docker"
done
ip_list=(10.20.1.133 10.20.1.134 10.20.1.132)
for ip in "${ip_list[@]}"; do
scp /etc/docker/daemon.json root@${ip}:/etc/docker/daemon.json
# scp /etc/ssh/sshd_config root@${ip}:/etc/ssh/sshd_config
ssh root@${ip} "systemctl restart docker"
# ssh root@${ip} "systemctl restart sshd"
done
vim /etc/docker/daemon.json
{
"insecure-registries" : ["10.20.1.130:8033"]
}
systemctl restart docker
# 生成ed25519 版本的ssh key
ssh-keygen -t ed25519 -f .ssh/id_ed25519 -C "m@github"
echo $(cat .ssh/id_ed25519.pub)
echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHXDzet+Z2/AmrzIZpPviz7Z9AMxLWiJUOwtj/3NPauk m@github" >>.ssh/authorized_keys
# 修改calico-node检测的IP
kubectl -n kube-system edit daemonset calico-node
env:
- name: FELIX_INTERFACEPREFIX
value: "eth0"
docker pull rancher/coreos-flannel:v0.13.0-rancher1
docker tag rancher/coreos-flannel:v0.13.0-rancher1 10.20.1.130:8033/rancher/coreos-flannel:v0.13.0-rancher1
docker push 10.20.1.130:8033/rancher/coreos-flannel:v0.13.0-rancher1
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 < /root/install/mysql/all_tables_demo_4.1.6.sql
NEW_UPDATED_SQL_FILE_FOLDER=/root/install/mysql/master_data_4.1.6
NEW_UPDATED_SQL_FILE_FOLDER=/root/install/mysql/manual_script
for sql_file in $(ls ${NEW_UPDATED_SQL_FILE_FOLDER}); do
echo "current file is ${sql_file}"
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 <"$NEW_UPDATED_SQL_FILE_FOLDER/${sql_file}"
echo "------------------"
echo ""
done
./mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306

View File

@@ -0,0 +1,53 @@
第一天
1. 内核配置修改、优化关闭swap、SELinux等服务器环境初始化
2. 通用工具安装curl wget vim ping telnet等、配置免密登录等
3. 安装时间同步服务器,对所有的服务器的时间进行同步
第二天
1. 离线、下载并安装docker服务修改优化docker配置
2. 安装docker-compose 安装Harbor服务器 配置服务器免密拉取镜像
3. 离线下载安装GlusterFS、Heketi、NFS配置安装底层存储服务
第三天
1. 离线下载Kubernetes安装文件
2. 上传Kubernetes离线安装包到所有服务器节点
3. 修改对应的kuberntes安装部署配置
4. 配置apiserver、controller-manager、scheduler、kube-proxy服务文件并安装、安装master和node节点验证Kubernetes集群安装
第四天
1. 安装calico网络插件
2. 验证Kubernetes集群对网络、节点、镜像等验证
3. 下载离线镜像安装服务暴露层Ingress组件
第五天
1. 下载所有中间件所需的镜像,并进行中间件的配置及安装部署
2. 安装MySQL数据库组件并进行配置验证
3. 安装MongoDB并完成配置与验证
4. 安装Redis并完成配置与验证
5. 安装EMQX无人机飞行控制通信组件并完成配置与验证
6. 安装Rabbit MQ消息队列组件并配置验证
7. 安装Nacos微服务注册中心组件并配置验证
第六天
1. 配置存储类、创建存储卷、安装业务层对象存储服务Minio集群
2. 安装视频流媒体业务SRS-Cluster
3. 确保推拉流服务正常运行
第七天
1. 离线下载并上传所有的中移凌云平台业务镜像
2. 根据环境适配中移凌云平台的业务配置并上传
3. 安装中移凌云基础初始化数据,运营平台数据初始化
第八天
1. 部署所有业务组件微服务,确保所有微服务正常运行
2. 初步达到平台的运行(不可使用)
第九天
1. 部署GDR转码服务器完成配置信息
2. 保证GDR服务运行正常
3. 开始初步联调
第十天
1. 中移凌云平台安装部署功能初步验证
2. 平台业务组件运行联测
3. 确保平台核心功能正常可用

View File

@@ -0,0 +1,24 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
.vscode/
charts/frontend-app/templates/traefik.yaml
charts/all-middleware/charts/rabbitmq-backup

View File

@@ -0,0 +1,13 @@
version: '2'
services:
minio1:
ports:
- "9000:9000"
- "9001:9001"
image: '10.20.1.130:8033/cmii/minio:2022.5.4'
environment:
- MINIO_ROOT_USER=cmii
- MINIO_ROOT_PASSWORD=B#923fC7mk
volumes:
- /var/lib/docker/minio-pv/:/data

View File

@@ -0,0 +1,114 @@
# 修改NSNFS server IP NFS path
# 修改 镜像私有地址
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.20.1.130:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage #provisioner名称,请确保该名称与 nfs-StorageClass.yaml文件中的provisioner名称保持一致
- name: NFS_SERVER
value: 10.20.1.130 #NFS Server IP地址
- name: NFS_PATH
value: /var/lib/docker/nfsdata #NFS挂载卷
volumes:
- name: nfs-client-root
nfs:
server: 10.20.1.130 #NFS Server IP地址
path: /var/lib/docker/nfsdata #NFS 挂载卷

View File

@@ -0,0 +1,40 @@
# 修改 镜像私有地址
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: 10.20.1.130:8033/rancher/busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,9 @@
server {
listen 39000;
proxy_pass 10.20.1.132:9000;
}
server {
listen 31935;
proxy_pass 10.20.1.133:30935;
}

View File

@@ -0,0 +1,28 @@
kubectl patch daemonset nginx-ingress-controller -n ingress-nginx --patch '{"spec":{"template":{"spec":{"hostNetwork": false}}}}'
kind: Service
apiVersion: v1
metadata:
name: ingress-nginx-service
namespace: ingress-nginx
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
nodePort: 30500
- name: https
protocol: TCP
port: 443
targetPort: 443
nodePort: 31500
selector:
app: ingress-nginx
clusterIP: 10.74.70.70
clusterIPs:
- 10.74.70.70
type: NodePort
sessionAffinity: None

View File

@@ -0,0 +1,43 @@
user root;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
use epoll;
worker_connections 65535;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_tokens off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
send_timeout 1200;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 600;
types_hash_max_size 2048;
client_max_body_size 2048m;
client_body_buffer_size 2048m;
underscores_in_headers on;
proxy_send_timeout 600;
proxy_read_timeout 600;
proxy_connect_timeout 600;
proxy_buffer_size 128k;
proxy_buffers 8 256k;
include /etc/nginx/conf.d/*.conf;
}
stream {
include /etc/nginx/conf.d/stream/*.conf;
}

View File

@@ -0,0 +1,500 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-srs
cmii.type: midware
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://192.168.233.100:8888;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-srs
cmii.type: midware
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
creationTimestamp: null
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: '192.168.233.100:8033/cmii/srs:v4.0-r3'
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 192.168.233.100
resources:
limits:
cpu: 1200m
memory: 6Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: zhbf/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: zhbf/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: '192.168.233.100:8033/cmii/cmii-srs-oss-adaptor:v4.0.6'
env:
- name: OSS_ENDPOINT
value: 'http://192.168.233.100:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 1200m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: zhbf/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- demo
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-engine
cmii.type: midware
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
creationTimestamp: null
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: operator
image: '192.168.233.100:8033/cmii/cmii-live-operator:v4.0.6'
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- demo
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: zhbf
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-engine
cmii.type: midware
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 4.0.6
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 4.1.6
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 4.1.6
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://192.168.233.100:30935'
rtsp: 'rtsp://192.168.233.100:30554'
srt: 'srt://192.168.233.100:30556'
flv: 'http://192.168.233.100:30500'
hls: 'http://192.168.233.100:30500'
rtc: 'webrtc://192.168.233.100:30090'
replay: 'https://192.168.233.100:30333'
minio:
endpoint: http://192.168.233.100:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls
---

View File

@@ -0,0 +1,87 @@
# 替换namespace
---
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 4.1.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
---
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 4.1.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 4.1.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 4.1.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,418 @@
# 替换namespace
# 修改 镜像私有地址
---
# Source: outside-deploy/charts/mysql-db/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
annotations:
secrets:
- name: helm-mysql
---
# Source: outside-deploy/charts/mysql-db/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
# Source: outside-deploy/charts/mysql-db/templates/primary/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
# Source: outside-deploy/charts/mysql-db/templates/primary/initialization-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: jxyd
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: mysql-db
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xmyd
cmii.app: mysql
cmii.type: middleware
helm.sh/chart: mysql-db-8.8.1
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xmyd
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
# Source: outside-deploy/charts/mysql-db/templates/primary/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xmyd
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
# Source: outside-deploy/charts/mysql-db/templates/primary/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations:
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xmyd
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
# Source: outside-deploy/charts/mysql-db/templates/primary/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xmyd
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
helm.sh/chart: mysql-db-8.8.1
app.kubernetes.io/release: xmyd
app.kubernetes.io/managed-by: mysql-db
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: "10.20.1.130:8033/cmii/bitnami-shell:10-debian-10-r140"
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: "10.20.1.130:8033/cmii/mysql:8.1.0-debian-11-r42"
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv

View File

@@ -0,0 +1,3 @@
# nacos的用户名为 developer 密码为 N@cos14Good
INSERT INTO `cmii_nacos_config`.`users`(`username`, `password`, `enabled`) VALUES ('developer', '$2a$10$4wDsWyBohXZ2aoTazyAkDOBvspbJijS30skdrd0kGzp1aRgxNLfqa', 1);

View File

@@ -0,0 +1,820 @@
# 替换namespace
# https://www.emqx.io/docs/en/v4.2/configuration/configuration.html#cluster
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/rbac-cluster.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: jxyd
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/configmap-cluster.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
data:
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc:443
EMQX_NAME: helm-emqxs
EMQX_CLUSTER__DISCOVERY: k8s
EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless
EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: jxyd
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/configmap-cluster.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
data:
emqx_auth_username.conf: |-
auth.user.1.username = cmlc
auth.user.1.password = odD8#Ve7.B
auth.user.password_hash = sha256
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_username,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/statefulset-cluster.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
affinity: {}
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: "10.20.1.130:8033/cmii/emqx:4.2.12"
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
readinessProbe:
httpGet:
path: /status
port: 8081
initialDelaySeconds: 5
periodSeconds: 5
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/acl.conf"
subPath: "acl.conf"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
# Source: outside-deploy/charts/all-middleware/charts/nacos/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: jxyd
labels:
cmii.app: helm-nacos
cmii.type: middleware
helm.sh/chart: nacos-1.1.1
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/rbac-cluster.yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: jxyd
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/role.yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/rbac-cluster.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: jxyd
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: jxyd
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/rolebinding.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/svc-cluster.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370
---
# Source: outside-deploy/charts/all-middleware/charts/mongo/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: jxyd
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
# Source: outside-deploy/charts/all-middleware/charts/nacos/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: jxyd
labels:
cmii.app: helm-nacos
cmii.type: middleware
helm.sh/chart: nacos-1.1.1
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38989
- port: 9848
name: server12
targetPort: 9848
nodePort: 38912
- port: 9849
name: server23
targetPort: 9849
nodePort: 38923
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: jxyd
publishNotReadyAddresses: true
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 35675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: jxyd
---
# Source: outside-deploy/charts/all-middleware/charts/mongo/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: jxyd
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: {}
containers:
- name: helm-mongo
image: "10.20.1.130:8033/cmii/mongo:5.0"
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
# Source: outside-deploy/charts/all-middleware/charts/nacos/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: jxyd
labels:
cmii.app: helm-nacos
cmii.type: middleware
helm.sh/chart: nacos-1.1.1
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
helm.sh/chart: nacos-1.1.1
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "3.1.0"
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity: {}
containers:
- name: nacos-server
image: "10.20.1.130:8033/cmii/nacos-server:v2.1.2"
ports:
- containerPort: 8848
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: jxyd
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: "10.20.1.130:8033/cmii/bitnami-shell:10-debian-10-r140"
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: "10.20.1.130:8033/cmii/rabbitmq:3.9.12-debian-10-r3"
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,590 @@
# 替换namespace
# 替换镜像地址
# 替换 jxyd
---
# Source: outside-deploy/charts/redis-db/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
---
# Source: outside-deploy/charts/redis-db/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
# Source: outside-deploy/charts/redis-db/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: "10.20.1.130:8033/cmii/redis:6.2.6-debian-10-r0"
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "1"
memory: 4Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
helm.sh/chart: redis-db-15.4.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: redis-db
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: "10.20.1.130:8033/cmii/redis:6.2.6-debian-10-r0"
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.jxyd.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "1"
memory: 4Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,24 @@
kind: Service
apiVersion: v1
metadata:
name: ingress-nginx-service
namespace: ingress-nginx
labels:
app: ingress-nginx
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
nodePort: 30500
- name: https
protocol: TCP
port: 443
targetPort: 443
nodePort: 31500
selector:
app: ingress-nginx
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,28 @@
- name: rtmp
protocol: TCP
port: 31935
targetPort: 30935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30550
targetPort: 30550
nodePort: 30550
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30556
rtmp: 'rtmp://36.138.132.240:31935'
rtsp: 'rtsp://36.138.132.240:30554'
srt: 'srt://36.138.132.240:30550'
flv: 'http://36.138.132.240:36500'
hls: 'http://36.138.132.240:33500'
rtc: 'webrtc://36.138.132.240:30556'
replay: 'https://36.138.132.240:30333'

View File

@@ -0,0 +1,495 @@
# 修改namespace
# 修改harbor私有仓库地址
# 修改RTMP的推拉流地址
# 修改minio的地址 10.20.1.132:9000
# 修改 mysql的密码 mysql的地址
# RTMP的公网地址 CANDIDATE
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-srs
cmii.type: midware
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://36.138.132.240:80;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-srs
cmii.type: midware
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
creationTimestamp: null
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: '10.20.1.130:8033/cmii/srs:v4.0-r3'
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 36.138.132.240
resources:
limits:
cpu: 1200m
memory: 6Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: zhbf/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: zhbf/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: '10.20.1.130:8033/cmii/cmii-srs-oss-adaptor:v4.0.6'
env:
- name: OSS_ENDPOINT
value: 'http://10.20.1.132:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 1200m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: zhbf/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-engine
cmii.type: midware
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
creationTimestamp: null
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: operator
image: '10.20.1.130:8033/cmii/cmii-live-operator:v4.0.6'
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: jxyd
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-engine
cmii.type: midware
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 4.0.6
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 4.1.6
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 4.1.6
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://36.138.132.240:30935'
rtsp: 'rtsp://36.138.132.240:30554'
srt: 'srt://36.138.132.240:30556'
flv: 'http://36.138.132.240:30500'
hls: 'http://36.138.132.240:30500'
rtc: 'webrtc://36.138.132.240:30557'
replay: 'https://36.138.132.240:30333'
minio:
endpoint: http://10.20.1.132:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls
---

View File

@@ -0,0 +1,31 @@
# 替换namespace
# 替换minio的实际地址和端口
# 修改rabbitmq的实际地址和端口需要暴露出来
export tenant_name=jxyd
./mc alias set ${tenant_name} http://10.20.1.132:9000 cmii B#923fC7mk
./mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls
./mc alias set demo https://oss.demo.uavcmlc.com:18000 cmii B#923fC7mk
./mc cp -r demo/jadenq/scenariomock/xg/ ${tenant_name}/jadenq/scenariomock/xg/
./mc cp -r demo/jadenq/application/file/中移凌云使用手册.pdf ${tenant_name}/jadenq/application/file/中移凌云使用手册.pdf
./mc cp -r demo/jadenq/defimage/def.jpg ${tenant_name}/jadenq/defimage/def.jpg
./mc cp -r demo/pub-cms/application/img/ ${tenant_name}/pub-cms/application/img/
./mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@10.20.1.130:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
./mc admin service restart ${tenant_name}
./mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
./mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
./mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
./mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
./mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
./mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
./mc ilm add --expiry-days "1" ${tenant_name}/tus

View File

@@ -0,0 +1,397 @@
# 修改 mysql的密码 mysql的地址
# 修改 minio的地址密码
# 修改 部署环境公网访问地址信息
# 替换namespace
# 替换 harbor私有仓库的地址
---
# Source: outside-deploy/charts/srs-cluster/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-srs-cm
namespace: zhbf
labels:
cmii.app: video-live-srs
cmii.type: middleware
helm.sh/chart: srs-cluster-2.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.0"
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 30985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 30080;
dir /home/hls;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-srs-op-svc:8080/hooks/on_publish;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 0;
mw_msgs 0;
}
publish {
firstpkt_timeout 4000;
normal_timeout 2000;
mr off;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
}
}
srs.op.conf: |-
debug: false
server:
port: 8080
spring:
application:
name: cmii-srs-operator
platform:
info:
name: cmii-live-srs-operator
description: cmii-live-srs-operator
version: 1.2.0
scanPackage: com.cmii.live
datasource:
type: com.alibaba.druid.pool.DruidDataSource
url: jdbc:mysql://helm-mysql:3306/cmii_live_srs_op?characterEncoding=utf8&useSSL=false&serverTimezone=GMT%2B8
username: k8s_admin
password: fP#UaH6qQ3)8
driver-class-name: com.mysql.cj.jdbc.Driver
boot:
admin:
client:
enabled: false
url: http://127.0.0.1:8888
instance:
service-url: http://127.0.0.1:8080
druid:
mysql:
usePingMethod: false
management:
endpoints:
enabled-by-default: true
web:
exposure:
include: '*'
endpoint:
health:
show-details: always
live:
srs:
rtmp-base: "rtmp://192.168.233.100:30935"
rtsp-base: "rtsp://192.168.233.100:30554"
srt-base: "srt://192.168.233.100:23333"
flv-base: "http://192.168.233.100:30500"
rtc-base: "webrtc://192.168.233.100:30500"
api-base: "http://helm-srs-rtc-svc:30985"
hls:
max-ts: 200
interval-ms: 6000
minio:
endpoint: http://192.168.233.100:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: srs-hls
hook:
on-publish:
- http://cmii-uav-cloud-live:8080/client/live/stream/reportVideoEvent
sync:
onStart: false
pool:
core: 8
max: 12
queue: 0
keepalive: 20
interval:
sync: 150
elect: 8
keepalive: 20
heartbeat: 8
logging:
level:
com.cmii.live.srs.mapper: info
---
# Source: outside-deploy/charts/srs-cluster/templates/service.yaml
---
apiVersion: v1
kind: Service
metadata:
name: srs-rtc
namespace: zhbf
spec:
type: ClusterIP
clusterIP: None
selector:
srs-role: webrtc
ports:
- name: srsrtc-rtmp
port: 30935
targetPort: 30935
- name: srsrtc-hls
port: 30080
targetPort: 30080
---
# Source: outside-deploy/charts/srs-cluster/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-srs-rtc-svc
namespace: zhbf
spec:
type: NodePort
selector:
srs-role: webrtc
ports:
- name: srs-rtmp
port: 30935
targetPort: 30935
nodePort: 30935
- name: srs-api
port: 30985
targetPort: 30985
nodePort: 30985
- name: srs-rtc
port: 30090
targetPort: 30090
nodePort: 30090
protocol: UDP
- name: srs-flv
port: 30080
targetPort: 30080
nodePort: 30080
---
# Source: outside-deploy/charts/srs-cluster/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-srs-op-svc
namespace: zhbf
spec:
type: ClusterIP
selector:
srs-role: op
ports:
- port: 8080
targetPort: 8080
---
# Source: outside-deploy/charts/srs-cluster/templates/operator-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: helm-srs-op
namespace: zhbf
labels:
srs-role: op
cmii.app: video-live-srs
cmii.type: middleware
helm.sh/chart: srs-cluster-2.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.0"
spec:
replicas: 1
selector:
matchLabels:
srs-role: op
template:
metadata:
labels:
srs-role: op
cmii.app: video-live-srs
cmii.type: middleware
helm.sh/chart: srs-cluster-2.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.0"
spec:
imagePullSecrets:
- name: harborsecret
affinity:
{}
containers:
- name: operator
image: "192.168.233.100:8033/cmii/cmii-srs-operator:v4.0.0"
imagePullPolicy: Always
resources:
limits:
memory: 8Gi
cpu: 4000m
requests:
memory: 256Mi
cpu: 100m
ports:
- name: operator
containerPort: 8080
protocol: TCP
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/application.yaml
subPath: application.yaml
volumes:
- name: srs-conf-file
configMap:
name: "helm-srs-cm"
items:
- key: "srs.op.conf"
path: "application.yaml"
---
# Source: outside-deploy/charts/srs-cluster/templates/webrtc-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-srs-rtc
namespace: zhbf
labels:
srs-role: webrtc
cmii.app: video-live-srs
cmii.type: middleware
helm.sh/chart: srs-cluster-2.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.0"
spec:
serviceName: srsrtc
replicas: 1
selector:
matchLabels:
srs-role: webrtc
template:
metadata:
labels:
srs-role: webrtc
cmii.app: video-live-srs
cmii.type: middleware
helm.sh/chart: srs-cluster-2.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.0"
spec:
imagePullSecrets:
- name: harborsecret
affinity:
containers:
- name: helm-srs
image: "192.168.233.100:8033/cmii/srs:v4.0-r4"
resources:
limits:
memory: 8Gi
cpu: 4
requests:
memory: 256Mi
cpu: 100m
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 30985
protocol: TCP
- name: srs-flv
containerPort: 30080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
env:
- name: CANDIDATE
value: "192.168.233.100"
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: "default/helm-srs/dvr"
- name: srs-vol
mountPath: /home/hls
subPath: "default/helm-srs/hls"
- name: oss-adaptor
image: "192.168.233.100:8033/cmii/cmii-srs-oss-adaptor:v4.0.0"
imagePullPolicy: Always
resources:
limits:
memory: 8Gi
cpu: 4
requests:
memory: 256Mi
cpu: 100m
env:
- name: OSS_ENDPOINT
value: http://192.168.233.100:9000
- name: OSS_AK
value: cmii
- name: OSS_SK
value: B#923fC7mk
- name: OSS_BUCKET
value: srs-hls
- name: SRS_OP
value: "http://helm-srs-op-svc:8080"
- name: MYSQL_ENDPOINT
value: helm-mysql:3306
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: default/helm-srs/hls
volumes:
- name: srs-conf-file
configMap:
name: "helm-srs-cm"
items:
- key: "srs.rtc.conf"
path: "docker.conf"
- name: srs-vol
emptyDir:
sizeLimit: 10Gi

View File

@@ -0,0 +1,38 @@
# nvidia-docker需要安装外挂的形式
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
sudo apt-get update
apt-get install -y nvidia-container-toolkit
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | \
sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo
sudo yum install -y nvidia-container-toolkit
nvidia-ctk runtime configure --runtime=docker
systemctl restart docker
export gdr_server=(gdr-hard-decoder-server_v1.0.1-release.tar.gz pack_gdr_release_0724_x64.tar.gz pack_ts2mp4_x64-0724.tar.gz ts2mp4_docker_image_v1.tar.gz)
for gdr_file in ${gdr_server[*]} ; do
echo ""
echo "current gdr file is => ${gdr_file}"
wget --no-check-certificate https://oss.demo.uavcmlc.com:18000/cmlc-installation/gdr_server/${gdr_file}
echo ""
echo ""
done
docker load < gdr-hard-decoder-server_v1.0.1-release.tar.gz
docker load < ts2mp4_docker_image_v1.tar.gz

View File

@@ -0,0 +1,26 @@
server {
listen 8181;
server_name localhost;
#允许跨域请求的域,*代表所有
add_header 'Access-Control-Allow-Origin' *;
#允许带上cookie请求
add_header 'Access-Control-Allow-Credentials' 'true';
#允许请求的方法,比如 GET/POST/PUT/DELETE
add_header 'Access-Control-Allow-Methods' *;
#允许请求的header
add_header 'Access-Control-Allow-Headers' *;
location /map {
alias /root/offline_map;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
location /mapdzdt {
alias /root/offline_map;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
server {
listen 8888;
server_name localhost;
location / {
proxy_pass http://localhost:30500;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.zhbf.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -0,0 +1,19 @@
apiVersion: v2
name: outside-deploy
description: 中移凌云平台对外交付部署标准化Charts。提供业务应用前端、后端中间件mysql、redis、minio等一键式部署的能力
deprecated: true
type: application
version: 1.1.0
appVersion: 2.2.2
kubeVersion: ^1.16.0-0
maintainers:
- name: super wdd
email: wangziwen@cmii.chinamobile.com

View File

@@ -0,0 +1,24 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
ci/*

View File

@@ -0,0 +1,30 @@
apiVersion: v2
name: all-ingress-config
description: 中移凌云平台所有的Ingress配置包含前端后端Api/Gateway等
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 2.2.2
keywords:
- uavcloud
- ingress
- template
- function
- chinamobile

View File

@@ -0,0 +1,33 @@
{{- if or .Values.frontendApplication.enabled .Values.backendApplication.enabled .Values.apiGatewayApplication.enabled }}
{{- if .Values.frontendApplication.enabled }}
-- 已经开启前端应用的Ingress
-- The Ingress of the front-end application has been opened!
{{- end }}
{{- if .Values.backendApplication.enabled }}
-- 已经开启后端应用的Ingress
-- The Ingress of the back-end application has been opened!
{{- end }}
{{- if .Values.apiGatewayApplication.enabled }}
-- 已经开启Api和Gateway应用的Ingress
-- Ingress of Api and Gateway applications has been opened!
{{- end }}
{{- else }}
[ERROR] 您并没有选择开启任何应用的Ingress仅仅更新ConfigMap
[ERROR] You did not choose to open Ingress for any app! Just update the ConfigMap! !
{{- end }}
{{ if .Values.global.domain.IsPrivateDeployment }}
-- 您选择了【私有化】域名部署!!
-- [INFO] 请注意域名中不包含前缀!
-- 当前中移凌云的主页访问地址为:{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/
-- You have chosen [Private] domain name deployment! !
-- [INFO] Please note that the domain name does not contain a prefix!
-- The current homepage access address of China Mobile Lingyun is{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/
{{- else }}
-- 当前部署的租户环境为:{{ .Values.global.domain.TenantEnvironment }}
-- 当前中移凌云的主页访问地址为:{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/{{ .Values.global.domain.TenantEnvironment }}/
-- The currently deployed tenant environment is: {{ .Values.global.domain.TenantEnvironment }}
-- The current homepage access address of China Mobile Lingyun is{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/{{ .Values.global.domain.TenantEnvironment }}/
{{- end }}
[SUCCESS] Ingress Deployment has been accomplished !!!

View File

@@ -0,0 +1,50 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Create chart name and version as used by the chart label.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "all-ingress.name" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 }}
{{- end }}
{{/*
Create the tls configuration for https to enable trafik-ingress
*/}}
{{- define "all-ingress-front.full.applications" -}}
- cmii-uav-platform
- cmii-uav-platform-ai-brain
- cmii-uav-platform-hyperspectral
- cmii-uav-platform-mws
- cmii-uav-platform-oms
- cmii-uav-platform-open
- cmii-uav-platform-splice
- cmii-uav-platform-splice-visual
{{- end }}
{{/*
common annotations of frontend applications
*/}}
{{- define "all-ingress.frontend.commom.annotations" -}}
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
{{- end }}
{{/*
Specific annotations created for api applications
*/}}
{{- define "all-ingress.all-apis.commom.annotations" -}}
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
{{- end }}
{{/*
the specific annotations for project-minio
*/}}
{{/*{{- define "uavcloud-ingress.apiGateway.project-minio.annotations" -}}*/}}
{{/*kubernetes.io/ingress.class: "nginx"*/}}
{{/*nginx.ingress.kubernetes.io/enable-cors: "true"*/}}
{{/*nginx.ingress.kubernetes.io/rewrite-target: /api/uav/minio/$2*/}}
{{/*{{- end }}*/}}

View File

@@ -0,0 +1,51 @@
{{- if .Values.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
{{- $DeployDomainName := .Values.global.domain.DeployDomainName -}}
{{- $IsPrivateDeployment := .Values.global.domain.IsPrivateDeployment -}}
{{- range $applicationName, $values := .Values.frontendApplication.manifest }}
---
kind: ConfigMap
apiVersion: v1
metadata:
{{- if eq $values.shortname "" }}
name: tenant-prefix-platform
{{- else }}
name: tenant-prefix-{{ $values.shortname }}
{{- end }}
namespace: {{ $namespace }}
data:
ingress-config.js: |-
// 从ConfigMap中注入
// injected from ConfigMap
var __GlobalIngressConfig = {
{{- if $IsPrivateDeployment }}
TenantEnvironment: "",
{{- else }}
TenantEnvironment: {{ $TenantEnvironment | quote -}},
{{- end }}
CloudHOST: {{ $DeployDomainName | quote -}},
{{- if not (contains "appli" $values.shortname ) }}
{{- /* short name not contains application, judge for situations */}}
{{- if contains "cms-portal" $values.shortname }}
ApplicationShortName: "cmsportal",
{{- else if contains "-rescue" $values.shortname }}
ApplicationShortName: {{ trimSuffix "-rescue" $values.shortname | quote -}},
{{- else if contains "screen" $values.shortname }}
ApplicationShortName: {{ trimSuffix "-screen" $values.shortname | quote -}},
{{- else }}
ApplicationShortName: {{ $values.shortname | quote }},
{{- end }}
{{- else }}
ApplicationShortName: {{ trimSuffix "-application" $values.shortname | quote -}},
{{- end }}
{{- range $appShortNamePlusTenantEnv, $appClientId := $values}}
{{- $realApplicationNamePlusTenantEnv := cat $values.shortname $TenantEnvironment | replace " " "-" }}
{{- if hasPrefix $appShortNamePlusTenantEnv $realApplicationNamePlusTenantEnv }}
AppClientId: {{ $appClientId | quote }}
{{- end }}
{{- end }}
}
---
{{- end }}
{{- end }}

View File

@@ -0,0 +1,215 @@
{{- if and .Values.enabled .Values.apiGatewayApplication.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
{{- $IsPrivateDeployment := .Values.global.domain.IsPrivateDeployment -}}
{{- $IsTlsEnables := .Values.global.ingress.tls_enabled -}}
{{- $scope := $ -}}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: {{ $namespace }}
labels:
type: {{ $.Values.apiGatewayApplication.type }}
accessmode: {{ $.Values.apiGatewayApplication.accessmode }}
helm.sh/chart: {{ include "all-ingress.name" $scope }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- if $.Values.global.image.tag }}
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
{{- end }}
annotations:
{{- include "all-ingress.frontend.commom.annotations" $scope | nindent 4 }}
{{- if $IsTlsEnables }}
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/permanent-redirect-code: '301'
{{- end }}
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: {{ $DeployDomainName }}
http:
paths:
{{- if $IsPrivateDeployment }}
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
{{- if eq $value false }}
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
{{- if eq $applicationName "material-warehouse" }}
- path: /api/warehouses/?(.*)
{{- else if eq $applicationName "gateway" }}
- path: /api/?(.*)
{{- else if eq $applicationName "admin-gateway" }}
- path: /oms/api/?(.*)
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
{{- /* - path: /?(.*)/api/minios/?(.*)*/}}
{{- else if eq $applicationName "open-gateway" }}
- path: /open/api/?(.*)
{{- else }}
- path: /{{ $applicationName }}/?(.*)
{{- end }}
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 8080
{{- end }}
{{- end }}
{{- else }}
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
{{- if eq $value false }}
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
{{- if eq $applicationName "material-warehouse" }}
- path: /{{ $TenantEnvironment }}/api/warehouses/?(.*)
{{- else if eq $applicationName "gateway" }}
- path: /{{ $TenantEnvironment }}/api/?(.*)
{{- else if eq $applicationName "admin-gateway" }}
- path: /{{ $TenantEnvironment }}/oms/api/?(.*)
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
{{- /* - path: /{{ $TenantEnvironment }}/?(.*)/api/minios/?(.*)*/}}
{{- else if eq $applicationName "open-gateway" }}
- path: /{{ $TenantEnvironment }}/open/api/?(.*)
{{- else }}
- path: /{{ $applicationName }}/?(.*)
{{- end }}
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 8080
{{- end }}
{{- end }}
{{- end }}
{{- if $IsTlsEnables }}
tls:
- hosts:
- {{ $DeployDomainName }}
- secretName: {{ $DeployDomainName | quote }}
{{- end }}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-apis-ingress
namespace: {{ $namespace }}
labels:
type: {{ $.Values.apiGatewayApplication.type }}
accessmode: {{ $.Values.apiGatewayApplication.accessmode }}
helm.sh/chart: {{ include "all-ingress.name" $scope }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- if $.Values.global.image.tag }}
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
{{- end }}
annotations:
{{- include "all-ingress.all-apis.commom.annotations" $scope | nindent 4 }}
{{- if $IsTlsEnables }}
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/permanent-redirect-code: '301'
{{- end }}
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: {{ $DeployDomainName }}
http:
paths:
{{- if $IsPrivateDeployment }}
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
{{- if $value }}
{{- if eq $applicationName "material-warehouse" }}
- path: /?(.*)/api/warehouses/?(.*)
{{- else if eq $applicationName "gateway" }}
- path: /api/?(.*)
{{- else if eq $applicationName "admin-gateway" }}
- path: /oms/api/?(.*)
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
{{- /* - path: /?(.*)/api/minios/?(.*)*/}}
{{- else if eq $applicationName "open-gateway" }}
- path: /open/api/?(.*)
{{- else }}
- path: /{{ $applicationName }}/?(.*)
{{- end }}
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 8080
{{- end }}
{{- end }}
{{- else }}
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
{{- if $value }}
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
{{- if eq $applicationName "material-warehouse" }}
- path: /{{ $TenantEnvironment }}/?(.*)/api/warehouses/?(.*)
{{- else if eq $applicationName "gateway" }}
- path: /{{ $TenantEnvironment }}/api/?(.*)
{{- else if eq $applicationName "admin-gateway" }}
- path: /{{ $TenantEnvironment }}/oms/api/?(.*)
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
{{- /* - path: /{{ $TenantEnvironment }}/?(.*)/api/minios/?(.*)*/}}
{{- else if eq $applicationName "open-gateway" }}
- path: /{{ $TenantEnvironment }}/open/api/?(.*)
{{- else }}
- path: /{{ $applicationName }}/?(.*)
{{- end }}
pathType: ImplementationSpecific
backend:
serviceName: {{ $key }}
servicePort: 8080
{{- end }}
{{- end }}
{{- end }}
{{- if $IsTlsEnables }}
tls:
- hosts:
- {{ $DeployDomainName }}
- secretName: {{ $DeployDomainName | quote }}
{{- end }}
---
{{- end }}
{{/*apiVersion: networking.k8s.io/v1beta1*/}}
{{/*kind: Ingress*/}}
{{/*metadata:*/}}
{{/* name: project-minio-ingress*/}}
{{/* namespace: {{ $namespace }}*/}}
{{/* labels:*/}}
{{/* type: {{ $.Values.apiGatewayApplication.type }}*/}}
{{/* accessmode: {{ $.Values.apiGatewayApplication.accessmode }}*/}}
{{/* helm.sh/chart: {{ include "uavcloud-ingress.name" $scope }}*/}}
{{/* app.kubernetes.io/managed-by: {{ $.Release.Service }}*/}}
{{/* {{- if $.Values.global.image.tag }}*/}}
{{/* app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}*/}}
{{/* {{- end }}*/}}
{{/* annotations:*/}}
{{/* {{- include "uavcloud-ingress.apiGateway.project-minio.annotations" $scope | nindent 4 }}*/}}
{{/* {{- if $IsTlsEnables }}*/}}
{{/* nginx.ingress.kubernetes.io/ssl-redirect: "true"*/}}
{{/* nginx.ingress.kubernetes.io/permanent-redirect-code: '301'*/}}
{{/* {{- end }}*/}}
{{/* nginx.ingress.kubernetes.io/configuration-snippet: |*/}}
{{/* proxy_set_header Upgrade $http_upgrade;*/}}
{{/* proxy_set_header Connection "Upgrade";*/}}
{{/*spec:*/}}
{{/* rules:*/}}
{{/* - host: {{ $DeployDomainName }}*/}}
{{/* http:*/}}
{{/* paths:*/}}
{{/* {{- if $IsPrivateDeployment }}*/}}
{{/* - path: /?(.*)/api/minio/?(.*)*/}}
{{/* {{- else }}*/}}
{{/* - path: /{{ $TenantEnvironment }}/?(.*)/api/minio/?(.*)*/}}
{{/* {{- end }}*/}}
{{/* pathType: ImplementationSpecific*/}}
{{/* backend:*/}}
{{/* serviceName: cmii-project-minio*/}}
{{/* servicePort: 8080*/}}
{{/* {{- if $IsTlsEnables }}*/}}
{{/* tls:*/}}
{{/* - hosts:*/}}
{{/* - {{ $DeployDomainName }}*/}}
{{/* - secretName: {{ $DeployDomainName | quote }}*/}}
{{/* {{- end }}*/}}
{{/*---*/}}

View File

@@ -0,0 +1,35 @@
{{- if and .Values.enabled .Values.backendApplication.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
{{- $scope := $ -}}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: {{ $namespace }}
labels:
type: {{ .Values.backendApplication.type }}
accessmode: {{ $.Values.backendApplication.accessmode }}
helm.sh/chart: {{ include "all-ingress.name" $scope }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- if $.Values.global.image.tag }}
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
{{- end }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
{{- range $key,$value := .Values.backendApplication.manifest }}
{{- $applicationName := $key | trunc 63 }}
- host: {{ $applicationName }}.uavcloud-{{ $TenantEnvironment }}.io
http:
paths:
- path: /
backend:
serviceName: {{ $applicationName }}
servicePort: 8080
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,127 @@
{{- if and .Values.enabled .Values.frontendApplication.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
{{- $IsPrivateDeployment := .Values.global.domain.IsPrivateDeployment -}}
{{- $scope := $ -}}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: {{ $namespace }}
labels:
type: {{ .Values.frontendApplication.type }}
accessmode: {{ $.Values.frontendApplication.accessmode }}
helm.sh/chart: {{ include "all-ingress.name" $scope }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- if $.Values.global.image.tag }}
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
{{- end }}
annotations:
{{- include "all-ingress.frontend.commom.annotations" $scope | nindent 4 }}
{{- if .Values.global.ingress.tls_enabled }}
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/permanent-redirect-code: '301'
{{- end }}
nginx.ingress.kubernetes.io/configuration-snippet: |
{{- range $applicationName, $values := .Values.frontendApplication.manifest }}
{{- if $IsPrivateDeployment }}
{{- if eq $values.shortname "" }}
{{- /* 主域名的情况, 域名改造 */}}
rewrite ^(/green)$ $1/ redirect;
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/inspection)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/park)$ $1/ redirect;
{{- /* 主域名的情况, 域名改造 end end end */}}
{{- else if not (contains "appli" $values.shortname) }}
{{- /* 特殊短域名的情况 */}}
{{- /* short name not contains application, judge for situations */}}
{{- if contains "-portal" $values.shortname}}
rewrite ^(/cmsportal)$ $1/ redirect;
{{- else if contains "-rescue" $values.shortname }}
rewrite ^(/{{ trimSuffix "-rescue" $values.shortname }})$ $1/ redirect;
{{- else if contains "screen" $values.shortname }}
rewrite ^(/ {{ trimSuffix "-screen" $values.shortname }})$ $1/ redirect;
{{- else }}
{{- /* 没有特殊规则的域名 全部会出现在这里 */}}
rewrite ^(/{{ $values.shortname }})$ $1/ redirect;
{{- end }}
{{- else if (contains "appli" $values.shortname) }}
rewrite ^(/{{ trimSuffix "-application" $values.shortname }})$ $1/ redirect;
{{- else }}
{{- /* 备份一下 以防万一 */}}
rewrite ^(/{{ $values.shortname }})$ $1/ redirect;
{{- end }}
{{- else }}
{{- if eq $values.shortname "" }}
rewrite ^(/{{- $TenantEnvironment -}})$ $1/ redirect;
{{- else }}
rewrite ^(/{{- cat $TenantEnvironment $values.shortname | replace " " "/" -}})$ $1/ redirect;
{{- end }}
{{- end }}
{{- end }}
spec:
rules:
- host: {{ $DeployDomainName }}
http:
paths:
{{- /* 域名改造, add for demo */}}
- path: /inspection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /green/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /park/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
{{- /* 域名改造, end end end */}}
{{- range $applicationName, $values := .Values.frontendApplication.manifest }}
{{- if $IsPrivateDeployment }}
{{- if eq $values.shortname ""}}
- path: /?(.*)
{{- else if (contains "appli" $values.shortname) }}
- path: /{{ trimSuffix "-application" $values.shortname }}/?(.*)
{{- else }}
- path: /{{ $values.shortname }}/?(.*)
{{- end }}
{{- else }}
{{- if eq $values.shortname ""}}
- path: /{{ $TenantEnvironment }}/?(.*)
{{- else if not (contains "appli" $values.shortname) }}
- path: /{{ $TenantEnvironment }}/{{ trimSuffix "-application" $values.shortname }}/?(.*)
{{- else }}
- path: /{{- cat $TenantEnvironment $values.shortname | replace " " "/" -}}/?(.*)
{{- end }}
{{- end }}
pathType: ImplementationSpecific
backend:
serviceName: {{ $applicationName }}
servicePort: 9528
{{- end }}
{{- if .Values.global.ingress.tls_enabled }}
tls:
- hosts:
- {{ $DeployDomainName }}
- secretName: {{ $DeployDomainName | quote }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,150 @@
enabled: true
frontendApplication:
enabled: true
type: frontend
accessmode: public
manifest:
# AppFullName vs AppShortName for frontend applications
cmii-uav-platform:
shortname: ""
cmii-uav-platform-ai-brain:
shortname: "ai-brain"
ai-brain-dev: APP_i6mlNKODBo42OIVn
ai-brain-test: APP_0BF17ayVaSpY89O4
ai-brain-feature: APP_0BF17ayVaSpY89O4
ai-brain-devbase: APP_0BF17ayVaSpY89O4
ai-brain-uat: APP_kZeiSXxg9qZxj6ue
ai-brain-demo: APP_0BF17ayVaSpY89O4
ai-brain-outside: APP_0BF17ayVaSpY89O4 # this is actually outside works, copy from demo
cmii-uav-platform-hyperspectral:
shortname: "hyper"
hyper-dev: APP_CN713PC4qwViGj1d
hyper-test: APP_xtN9XF2L1J4IRHaB
hyper-devbase: APP_xtN9XF2L1J4IRHaB
hyper-feature: APP_xtN9XF2L1J4IRHaB
hyper-uat: APP_OT4l1kYrzWT4tiif
hyper-demo: APP_xtN9XF2L1J4IRHaB
hyper-outside: APP_xtN9XF2L1J4IRHaB # this is actually outside works, copy from demo
cmii-uav-platform-mws:
shortname: "mws"
mws-dev: APP_4lVSVI0ZGxTssir8
mws-test: APP_MEeBJHp1fSVD1Wuw
mws-devbase: APP_MEeBJHp1fSVD1Wuw
mws-feature: APP_MEeBJHp1fSVD1Wuw
mws-uat: APP_U4GEiHutGQL5prSP
mws-demo: APP_MEeBJHp1fSVD1Wuw
mws-outside: APP_MEeBJHp1fSVD1Wuw # this is actually outside works, copy from demo
cmii-uav-platform-mws-admin:
shortname: "mws-admin"
cmii-uav-platform-oms:
shortname: "oms"
cmii-uav-platform-cms:
shortname: "cms"
cmii-uav-platform-cms-portal:
shortname: "cmsportal"
cmii-uav-platform-open:
shortname: "open"
cmii-uav-platform-splice:
shortname: "splice"
splice-dev: APP_bYdlPsBBIncZdaYR
splice-test: APP_l4HIMixfIXhlCTi9
splice-devbase: APP_l4HIMixfIXhlCTi9
splice-feature: APP_l4HIMixfIXhlCTi9
splice-uat: APP
splice-demo: APP_l4HIMixfIXhlCTi9
splice-outside: APP_l4HIMixfIXhlCTi9 # this is actually outside works, copy from demo
cmii-uav-platform-splice-visual:
shortname: "splice-visual"
cmii-uav-platform-detection:
shortname: "detection"
detectiondemo: APP_3RmcJaecdbsvQwZn
detection-outside: APP_3RmcJaecdbsvQwZn # this is actually outside works, copy from demo
cmii-uav-platform-security:
shortname: "security"
security-demo: APP_JUSEMc7afyWXxvE7
security-outside: APP_JUSEMc7afyWXxvE7 # this is actually outside works, copy from demo
cmii-uav-platform-visualization:
shortname: "visualization"
visualization-demo:
visualization-outside: # this is actually outside works, copy from demo
cmii-uav-platform-logistics: # 医疗物流
shortname: "logistics"
logistics-demo: APP_PvdfRRRBPL8xbIwl
logistics-outside: APP_PvdfRRRBPL8xbIwl
cmii-uav-platform-share: # 分享
shortname: "share"
share-demo: APP_4lVSVI0ZGxTssir8
share-outside: APP_4lVSVI0ZGxTssir8
cmii-uav-platform-base: # 基础版
shortname: "base"
base-demo: APP_9LY41OaKSqk2btY0
base-outside: APP_9LY41OaKSqk2btY0 # this is actually outside works, copy from demo
cmii-uav-platform-traffic-screen: # 交通管理
shortname: "traffic"
traffic-demo: APP_jf04PerIBiDeRO6l
traffic-outside: APP_jf04PerIBiDeRO6l
cmii-uav-platform-emergency-rescue: #应急保障
shortname: "emergency"
emergency-demo: APP_aGsTAY1uMZrpKdfk
emergency-outside: APP_aGsTAY1uMZrpKdfk
cmii-suav-platform-airspace: # 政府监管
shortname: "supervision"
emergency-demo: APP_qqSu82THfexI8PLM
emergency-outside: APP_qqSu82THfexI8PLM
backendApplication:
enabled: true
type: backend
# this ingress is for swagger url, CI/CD url,they can only be accessed only by internal network
accessmode: internal
manifest:
# all backend applications
cmii-admin-data: false
cmii-admin-user: false
cmii-uav-airspace: false
cmii-uav-brain: false
cmii-uav-clusters: false
cmii-uav-data-post-process: false
cmii-uav-developer: false
cmii-uav-device: false
cmii-uav-kpi-monitor: false
cmii-uav-live: false
cmii-uav-logger: false
cmii-uav-mission: false
cmii-uav-monitor: false
cmii-uav-mqtthandler: false
cmii-uav-notice: false
cmii-uav-oauth: false
cmii-uav-process: false
cmii-uav-security-system: false
cmii-uav-surveillance: false
cmii-uav-user: false
cmii-uav-waypoint: false
cmii-uav-cms: false
cmii-uav-industrial-portfolio: false
cmii-project-minio: false
cmii-uav-material-warehouse: false
cmii-uav-gateway: false
cmii-open-gateway: false
cmii-admin-gateway: false
apiGatewayApplication:
enabled: true
type: "api-gateway"
# this ingress is for apis and gateways
accessmode: pulic
manifest:
# all applications need to expose api/gateway to public network
# cmii-project-minio: false # deprecated
cmii-uav-material-warehouse: true
cmii-uav-gateway: false
cmii-open-gateway: false
cmii-admin-gateway: false

View File

@@ -0,0 +1,24 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
ci/*

View File

@@ -0,0 +1,31 @@
apiVersion: v2
name: all-middleware
description: including all middlewares for the uavcloud platform,
such as mysql, redis, emqx, mongo, rabbitmq, nacos
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 2.2.2
keywords:
- uavcloud
- middleware
- template
- function
- chinamobile

View File

@@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
Jenkinsfile
chart_template.yaml
emqx.conf

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: emqx
description: emqx middleware, can by deployed in clusterMode or standaloneMode
dependend on PVCs in helm-emqxs
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 2.2.0

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Kubernetes standard labels
*/}}
{{- define "uavcloud-middleware.emqx.labels.standard" -}}
cmii.type: {{ .Values.global.application.type }}
{{- if .Values.enabled.clusterMode}}
cmii.app: {{ .Values.appName.clusterMode }}
cmii.emqx.architecture: cluster
{{- else }}
cmii.app: {{ .Values.appName.standaloneMode }}
cmii.emqx.architecture: standalone
{{- end }}
helm.sh/chart: {{ include "uavcloud-middleware.chart" . }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- if .Values.global.image.tag }}
app.kubernetes.io/version: {{ .Values.global.image.tag | quote }}
{{- end }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "uavcloud-middleware.emqx.labels.matchLabels" -}}
cmii.type: {{ .Values.global.application.type }}
{{- if .Values.enabled.clusterMode}}
cmii.app: {{ .Values.appName.clusterMode }}
cmii.emqx.architecture: cluster
{{- else }}
cmii.app: {{ .Values.appName.standaloneMode }}
cmii.emqx.architecture: standalone
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,24 @@
{{- define "uavcloud-middleware.emqx.cluster.config.acl" -}}
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
{{- end -}}
{{- define "uavcloud-middleware.emqx.cluster.config.emqx_auth_username" -}}
auth.user.1.username = {{ .Values.auth.username }}
auth.user.1.password = {{ .Values.auth.password }}
auth.user.password_hash = sha256
{{- end -}}
{{- define "uavcloud-middleware.emqx.cluster.config.loaded_plugins" -}}
{emqx_auth_username,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
{{- end -}}

View File

@@ -0,0 +1,41 @@
{{- if .Values.enabled.clusterMode }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.clusterMode -}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $applicationName }}-env
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
data:
EMQX_CLUSTER__K8S__ADDRESS_TYPE: hostname
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc:443
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
EMQX_NAME: {{ $applicationName }}
EMQX_CLUSTER__K8S__APP_NAME: {{ $applicationName }}
EMQX_CLUSTER__DISCOVERY: k8s
EMQX_CLUSTER__K8S__SERVICE_NAME: {{ $applicationName }}-headless
EMQX_CLUSTER__K8S__NAMESPACE: {{ $namespace }}
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $applicationName }}-cm
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
data:
emqx_auth_username.conf: |-
{{- include "uavcloud-middleware.emqx.cluster.config.emqx_auth_username" . | nindent 4 }}
acl.conf: |-
{{- include "uavcloud-middleware.emqx.cluster.config.acl" . | nindent 4 }}
loaded_plugins: |-
{{- include "uavcloud-middleware.emqx.cluster.config.loaded_plugins" . | nindent 4 }}
---
{{- end }}

View File

@@ -0,0 +1,30 @@
{{- if and .Values.enabled.clusterMode .Values.ingress.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.clusterMode -}}
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
rules:
- host: "emqx.{{ $DeployDomainName }}"
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: {{ $applicationName }}
servicePort: {{ .Values.containerPort.dashboard }}
{{- if .Values.global.ingress.tls_enabled }}
tls:
- hosts:
- "emqx.{{ $DeployDomainName }}"
secretName: "x.{{ $DeployDomainName }}-tls"
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,22 @@
{{- if and .Values.enabled.clusterMode .Values.enabled.standaloneMode }}
{{/* 不要用这个使用统一生成的PVC */}}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.clusterMode -}}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
storageClassName: {{ .Values.global.storageClass.name }}
accessModes:
- {{ .Values.storageClass.accessMode }}
volumeMode: {{ .Values.storageClass.volumeMode }}
resources:
requests:
storage: {{ .Values.storageClass.resources.requests.storage }}
---
{{- end }}

View File

@@ -0,0 +1,40 @@
{{- if .Values.enabled.clusterMode }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.clusterMode -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
subjects:
- kind: ServiceAccount
name: {{ $applicationName }}
namespace: {{ $namespace }}
roleRef:
kind: Role
name: {{ $applicationName }}
apiGroup: rbac.authorization.k8s.io
---
{{- end }}

View File

@@ -0,0 +1,94 @@
{{- if .Values.enabled.clusterMode }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.clusterMode -}}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
replicas: {{ .Values.replicas.clusterMode }}
serviceName: {{ $applicationName }}-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 8 }}
spec:
{{- if .Values.global.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
{{- end }}
serviceAccountName: {{ $applicationName }}
containers:
- name: {{ $applicationName }}
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/emqx:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.global.image.pullPolicy }}
ports:
- name: mqtt
containerPort: {{ .Values.containerPort.mqtt }}
- name: mqttssl
containerPort: {{ .Values.containerPort.mqttssl }}
- name: mgmt
containerPort: {{ .Values.containerPort.mgmt }}
- name: ws
containerPort: {{ .Values.containerPort.websocket }}
- name: wss
containerPort: {{ .Values.containerPort.wss }}
- name: dashboard
containerPort: {{ .Values.containerPort.dashboard }}
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: {{ $applicationName }}-env
resources:
{{- toYaml .Values.resources | nindent 12 }}
readinessProbe:
httpGet:
path: /status
port: {{ .Values.containerPort.mgmt | default 8081 }}
initialDelaySeconds: 5
periodSeconds: 5
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: {{ $applicationName }}-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
subPath: emqx_auth_username.conf
readOnly: false
- name: {{ $applicationName }}-cm
mountPath: "/opt/emqx/etc/acl.conf"
subPath: "acl.conf"
readOnly: false
- name: {{ $applicationName }}-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: {{ $applicationName }}-cm
configMap:
name: {{ $applicationName }}-cm
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
{{- end }}

View File

@@ -0,0 +1,36 @@
{{- if .Values.enabled.clusterMode }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.clusterMode -}}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
type: NodePort
selector:
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 4 }}
ports:
- port: {{ .Values.containerPort.mqtt }}
name: mqtt
targetPort: {{ .Values.containerPort.mqtt }}
{{- if .Values.nodePort.enabled }}
nodePort: {{ .Values.nodePort.mqtt }}
{{- end }}
- port: {{ .Values.containerPort.dashboard }}
name: dashboard
targetPort: {{ .Values.containerPort.dashboard }}
{{- if .Values.nodePort.enabled }}
nodePort: {{ .Values.nodePort.dashboard }}
{{- end }}
- port: {{ .Values.containerPort.websocket }}
name: mqtt-websocket
targetPort: {{ .Values.containerPort.websocket }}
{{- if .Values.nodePort.enabled }}
nodePort: {{ .Values.nodePort.mqttWebSocket }}
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,47 @@
{{- if and .Values.enabled.clusterMode}}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.clusterMode -}}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $applicationName }}-headless
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
type: ClusterIP
clusterIP: None
selector:
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 4 }}
ports:
- name: mqtt
port: {{ .Values.containerPort.mqtt }}
protocol: TCP
targetPort: {{ .Values.containerPort.mqtt }}
- name: mqttssl
port: {{ .Values.containerPort.mqttssl }}
protocol: TCP
targetPort: {{ .Values.containerPort.mqttssl }}
- name: mgmt
port: {{ .Values.containerPort.mgmt }}
protocol: TCP
targetPort: {{ .Values.containerPort.mgmt }}
- name: websocket
port: {{ .Values.containerPort.websocket }}
protocol: TCP
targetPort: {{ .Values.containerPort.websocket }}
- name: wss
port: {{ .Values.containerPort.wss }}
protocol: TCP
targetPort: {{ .Values.containerPort.wss }}
- name: dashboard
port: {{ .Values.containerPort.dashboard }}
protocol: TCP
targetPort: {{ .Values.containerPort.dashboard }}
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370
---
{{- end }}

View File

@@ -0,0 +1,30 @@
{{- if and .Values.enabled.standaloneMode .Values.ingress.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.standaloneMode -}}
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
rules:
- host: "emqx.{{ $DeployDomainName }}"
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: {{ $applicationName }}
servicePort: {{ .Values.containerPort.dashboard }}
{{- if .Values.global.ingress.tls_enabled }}
tls:
- hosts:
- "emqx.{{ $DeployDomainName }}"
secretName: "x.{{ $DeployDomainName }}-tls"
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,82 @@
{{- if and .Values.enabled.standaloneMode (not .Values.enabled.clusterMode) }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.standaloneMode -}}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
serviceName: {{ $applicationName }}
replicas: {{ .Values.replicas.standaloneMode }}
selector:
matchLabels:
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 8 }}
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
{{- if .Values.global.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
{{- end }}
containers:
- name: {{ $applicationName }}
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/emqx:{{ .Values.image.tag }}"
securityContext:
privileged: true
resources:
{{- toYaml .Values.resources | nindent 12 }}
ports:
- containerPort: {{ .Values.containerPort.mqtt }}
name: mqtt
protocol: TCP
- containerPort: {{ .Values.containerPort.dashboard }}
name: dashboard
protocol: TCP
- containerPort: {{ .Values.containerPort.websocket }}
name: mqtt-websocket
protocol: TCP
volumeMounts:
- name: {{ $applicationName }}-plugins
mountPath: /opt/emqx/data/loaded_plugins
subPath: loaded_plugins
- name: {{ $applicationName }}-auth
mountPath: /opt/emqx/etc/plugins/emqx_auth_username.conf
subPath: emqx_auth_username.conf
- name: emqx-data
mountPath: /opt/emqx/data/emqx_erl_pipes
readOnly: false
subPath: {{ $namespace }}/{{ $applicationName }}/data
- name: emqx-data
mountPath: /opt/emqx/log
readOnly: false
subPath: {{ $namespace }}/{{ $applicationName }}/log
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: {{ $applicationName }}-plugins
configMap:
name: {{ $applicationName }}-plugins
items:
- key: loaded_plugins
path: loaded_plugins
- name: {{ $applicationName }}-auth
configMap:
name: {{ $applicationName }}-auth
items:
- key: emqx_auth_username.conf
path: emqx_auth_username.conf
---
{{- end }}

View File

@@ -0,0 +1,36 @@
{{- if and .Values.enabled.standaloneMode (not .Values.enabled.clusterMode) }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName.standaloneMode -}}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
spec:
type: {{ .Values.global.middlewareService.type }}
selector:
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 4 }}
ports:
- port: {{ .Values.containerPort.mqtt }}
name: mqtt
targetPort: {{ .Values.containerPort.mqtt }}
{{- if eq .Values.global.middlewareService.type "NodePort" }}
nodePort: {{ .Values.nodePort.mqtt }}
{{- end }}
- port: {{ .Values.containerPort.dashboard }}
name: dashboard
targetPort: {{ .Values.containerPort.dashboard }}
{{- if eq .Values.global.middlewareService.type "NodePort" }}
nodePort: {{ .Values.nodePort.dashboard }}
{{- end }}
- port: {{ .Values.containerPort.websocket }}
name: mqtt-websocket
targetPort: {{ .Values.containerPort.websocket }}
{{- if eq .Values.global.middlewareService.type "NodePort" }}
nodePort: {{ .Values.nodePort.mqttWebSocket }}
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,54 @@
enabled:
clusterMode: true
standaloneMode: false
auth:
username: cmii
password: odD8#Ve7.B
storageClass:
accessMode: "ReadWriteOnce"
volumeMode: Filesystem
resources:
requests:
storage: 16Gi
nodePort:
enabled: true
mqtt: 31883
dashboard: 48083
mqttWebSocket: 38083
ingress:
enabled: false
image:
repository: docker.io/emqx # commonly no use
tag: 4.2.12
replicas:
clusterMode: 3
standaloneMode: 1
appName:
clusterMode: helm-emqxs
standaloneMode: helm-emqx
# please don't modify this values below !!!
containerPort:
mqtt: 1883
mgmt: 8081
websocket: 8083
wss: 8084
mqttssl: 8883
dashboard: 18083
resources:
limits:
memory: 2Gi
cpu: "1"
requests:
memory: 1Gi
cpu: 300m

View File

@@ -0,0 +1,25 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
Jenkinsfile
chart_template.yaml

View File

@@ -0,0 +1,24 @@
# Just template
apiVersion: v2
name: mongo
description: uavcloud middleware for mongo
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 2.2.2

View File

@@ -0,0 +1,21 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Kubernetes standard labels
*/}}
{{- define "uavcloud-middleware.mongo.labels.standard" -}}
cmii.app: {{ .Values.appName }}
cmii.type: {{ .Values.global.application.type }}
helm.sh/chart: {{ include "uavcloud-middleware.chart" . }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- if $.Values.global.image.tag }}
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
{{- end }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "uavcloud-middleware.mongo.labels.matchLabels" -}}
cmii.app: {{ .Values.appName }}
cmii.type: {{ .Values.global.application.type }}
{{- end -}}

View File

@@ -0,0 +1,57 @@
{{- if .Values.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName -}}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.mongo.labels.standard" . | nindent 4 }}
spec:
serviceName: {{ $applicationName }}
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "uavcloud-middleware.mongo.labels.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "uavcloud-middleware.mongo.labels.standard" . | nindent 8 }}
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
{{- if .Values.global.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
{{- end }}
containers:
- name: {{ $applicationName }}
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/mongo:{{ .Values.image.tag }}"
resources:
{{- toYaml .Values.resources | nindent 12 }}
ports:
- containerPort: {{ .Values.containerPort.mongo }}
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: {{ .Values.auth.username }}
- name: MONGO_INITDB_ROOT_PASSWORD
value: {{ .Values.auth.password }}
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: {{ $namespace }}/{{ $applicationName }}/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if .Values.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName -}}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.mongo.labels.standard" . | nindent 4 }}
spec:
type: {{ .Values.global.middlewareService.type }}
selector:
{{- include "uavcloud-middleware.mongo.labels.matchLabels" . | nindent 4 }}
ports:
- port: 27017
name: server-27017
targetPort: 27017
{{- if eq .Values.global.middlewareService.type "NodePort" }}
nodePort: {{ .Values.nodePort.mongo }}
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,35 @@
image:
repository: docker.io
tag: "5.0"
replicaCount: 1
enabled: true
appName: helm-mongo
auth:
username: cmlc
password: REdPza8#oVlt
nodePort:
mongo: 37017
containerPort:
mongo: 27017
resources:
limits:
memory: 4Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
storageClass:
accessMode: "ReadWriteMany"
volumeMode: Filesystem
resources:
requests:
storage: 16Gi

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: nacos
description: uavcloud middleware for nacos
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 2.2.2

View File

@@ -0,0 +1,253 @@
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************/
/* 数据库全名 = cmii_nacos_config */
/* 表名称 = config_info */
/******************************************/
# CREATE SCHEMA cmii_nacos_config;
CREATE TABLE `config_info`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(255) DEFAULT NULL,
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`app_name` varchar(128) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`c_desc` varchar(256) DEFAULT NULL,
`c_use` varchar(64) DEFAULT NULL,
`effect` varchar(64) DEFAULT NULL,
`type` varchar(64) DEFAULT NULL,
`c_schema` text,
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`, `group_id`, `tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='config_info';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_info_aggr */
/******************************************/
CREATE TABLE `config_info_aggr`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(255) NOT NULL COMMENT 'group_id',
`datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
`content` longtext NOT NULL COMMENT '内容',
`gmt_modified` datetime NOT NULL COMMENT '修改时间',
`app_name` varchar(128) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`, `group_id`, `tenant_id`, `datum_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='增加租户字段';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_info_beta */
/******************************************/
CREATE TABLE `config_info_beta`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`, `group_id`, `tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='config_info_beta';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_info_tag */
/******************************************/
CREATE TABLE `config_info_tag`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`, `group_id`, `tenant_id`, `tag_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='config_info_tag';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_tags_relation */
/******************************************/
CREATE TABLE `config_tags_relation`
(
`id` bigint(20) NOT NULL COMMENT 'id',
`tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
`tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`nid` bigint(20) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`nid`),
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`, `tag_name`, `tag_type`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='config_tag_relation';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = group_capacity */
/******************************************/
CREATE TABLE `group_capacity`
(
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID空字符表示整个集群',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数0表示使用默认值',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_group_id` (`group_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='集群、各Group容量信息表';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = his_config_info */
/******************************************/
CREATE TABLE `his_config_info`
(
`id` bigint(64) unsigned NOT NULL,
`nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`data_id` varchar(255) NOT NULL,
`group_id` varchar(128) NOT NULL,
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL,
`md5` varchar(32) DEFAULT NULL,
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
`src_user` text,
`src_ip` varchar(50) DEFAULT NULL,
`op_type` char(10) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`nid`),
KEY `idx_gmt_create` (`gmt_create`),
KEY `idx_gmt_modified` (`gmt_modified`),
KEY `idx_did` (`data_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='多租户改造';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = tenant_capacity */
/******************************************/
CREATE TABLE `tenant_capacity`
(
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_id` (`tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='租户容量信息表';
CREATE TABLE `tenant_info`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`kp` varchar(128) NOT NULL COMMENT 'kp',
`tenant_id` varchar(128) default '' COMMENT 'tenant_id',
`tenant_name` varchar(128) default '' COMMENT 'tenant_name',
`tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
`create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
`gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
`gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`, `tenant_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8
COLLATE = utf8_bin COMMENT ='tenant_info';
CREATE TABLE `users`
(
`username` varchar(50) NOT NULL PRIMARY KEY,
`password` varchar(500) NOT NULL,
`enabled` boolean NOT NULL
);
CREATE TABLE `roles`
(
`username` varchar(50) NOT NULL,
`role` varchar(50) NOT NULL,
UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
);
CREATE TABLE `permissions`
(
`role` varchar(50) NOT NULL,
`resource` varchar(255) NOT NULL,
`action` varchar(8) NOT NULL,
UNIQUE INDEX `uk_role_permission` (`role`, `resource`, `action`) USING BTREE
);
INSERT INTO users (username, password, enabled)
VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
INSERT INTO roles (username, role)
VALUES ('nacos', 'ROLE_ADMIN');

View File

@@ -0,0 +1,21 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Kubernetes standard labels
*/}}
{{- define "uavcloud-middleware.nacos.labels.standard" -}}
cmii.app: {{ .Values.appName }}
cmii.type: {{ .Values.global.application.type }}
helm.sh/chart: {{ include "uavcloud-middleware.chart" . }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- if $.Values.global.image.tag }}
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
{{- end }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "uavcloud-middleware.nacos.labels.matchLabels" -}}
cmii.app: {{ .Values.appName }}
cmii.type: {{ .Values.global.application.type }}
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- if .Values.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName -}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $applicationName }}-cm
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
data:
mysql.db.name: "{{ .Values.database.db_name }}"
mysql.db.host: "{{ .Values.database.host }}"
mysql.port: "{{ .Values.database.port }}"
mysql.user: "{{ .Values.database.username }}"
mysql.password: "{{ .Values.database.password }}"
---
{{- end }}

View File

@@ -0,0 +1,30 @@
{{- if and .Values.enabled .Values.ingress.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName -}}
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
spec:
rules:
- host: "nacos.{{ $DeployDomainName }}"
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: {{ $applicationName }}
servicePort: {{ .Values.containerPort.dashboard }}
{{- if .Values.global.ingress.tls_enabled }}
tls:
- hosts:
- "nacos.{{ $DeployDomainName }}"
secretName: "x.{{ $DeployDomainName }}-tls"
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,82 @@
{{- if .Values.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName -}}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
spec:
serviceName: {{ $applicationName }}
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "uavcloud-middleware.nacos.labels.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 8 }}
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
{{- if .Values.global.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
{{- end }}
containers:
- name: nacos-server
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/nacos-server:{{ .Values.image.tag }}"
ports:
- containerPort: {{ .Values.containerPort.dashboard }}
name: dashboard
env:
- name: NACOS_AUTH_ENABLE
value: "true"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: {{ $applicationName }}-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: {{ $applicationName }}-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: {{ $applicationName }}-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: {{ $applicationName }}-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: {{ $applicationName }}-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "{{ .Values.containerPort.dashboard }}"
- name: NACOS_APPLICATION_PORT
value: "{{ .Values.containerPort.dashboard }}"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
resources:
{{- toYaml .Values.resources | nindent 12 }}
---
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if .Values.enabled }}
{{- $namespace := .Release.Namespace -}}
{{- $applicationName := .Values.appName -}}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $applicationName }}
namespace: {{ $namespace }}
labels:
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
spec:
type: {{ .Values.global.middlewareService.type }}
selector:
{{- include "uavcloud-middleware.nacos.labels.matchLabels" . | nindent 4 }}
ports:
- port: {{ .Values.containerPort.dashboard }}
name: server
targetPort: {{ .Values.containerPort.dashboard }}
{{- if eq .Values.global.middlewareService.type "NodePort" }}
nodePort: {{ .Values.nodePort.dashboard }}
{{- end }}
---
{{- end }}

View File

@@ -0,0 +1,34 @@
image:
repository: docker.io/nacos
tag: 2.0.1
replicaCount: 1
ingress:
enabled: false
enabled: true
appName: helm-nacos
nodePort:
dashboard: 38848
database:
host: helm-mysql
port: 6033
username: k8s_admin
password: EWde2cKP9w.G
db_name: nacos_config
containerPort:
dashboard: 8848
resources:
limits:
memory: 2Gi
cpu: "1"
requests:
memory: 1Gi
cpu: 500m

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,6 @@
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 1.10.3
digest: sha256:710e8247ae70ea63a2fb2fde4320511ff28c7b5c7a738861427f104a7718bdf4
generated: "2021-12-02T17:29:16.053850737Z"

View File

@@ -0,0 +1,26 @@
annotations:
category: Infrastructure
apiVersion: v2
appVersion: 3.9.12
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 1.x.x
description: Open source message broker software that implements the Advanced Message
Queuing Protocol (AMQP)
home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq
icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png
keywords:
- rabbitmq
- message queue
- AMQP
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: rabbitmq
sources:
- https://github.com/bitnami/bitnami-docker-rabbitmq
- https://www.rabbitmq.com
version: 8.26.1

View File

@@ -0,0 +1,592 @@
# RabbitMQ
[RabbitMQ](https://www.rabbitmq.com/) is an open source multi-protocol message broker.
## TL;DR
```bash
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/rabbitmq
```
## Introduction
This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/).
## Prerequisites
- Kubernetes 1.12+
- Helm 3.1.0
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm install my-release bitnami/rabbitmq
```
The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```bash
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Parameters
### Global parameters
| Name | Description | Value |
| ------------------------- | ----------------------------------------------- | ----- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
### RabbitMQ Image parameters
| Name | Description | Value |
| ------------------- | -------------------------------------------------------------- | --------------------- |
| `image.registry` | RabbitMQ image registry | `docker.io` |
| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` |
| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.9.12-debian-10-r0` |
| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Set to true if you would like to see extra information on logs | `false` |
### Common parameters
| Name | Description | Value |
| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- |
| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` |
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
| `hostAliases` | Deployment pod host aliases | `[]` |
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
| `auth.username` | RabbitMQ application username | `user` |
| `auth.password` | RabbitMQ application password | `""` |
| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` |
| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` |
| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` |
| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` |
| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` |
| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` |
| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` |
| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` |
| `auth.tls.serverCertificate` | Server certificate content | `""` |
| `auth.tls.serverKey` | Server private key content | `""` |
| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` |
| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` |
| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` |
| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` |
| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` |
| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` |
| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` |
| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` |
| `memoryHighWatermark.value` | Memory high watermark value | `0.4` |
| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` |
| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` |
| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` |
| `clustering.enabled` | Enable RabbitMQ clustering | `true` |
| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` |
| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` |
| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` |
| `clustering.partitionHandling` | Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` | `autoheal` |
| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` |
| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` |
| `command` | Override default container command (useful when using custom images) | `[]` |
| `args` | Override default container args (useful when using custom images) | `[]` |
| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` |
| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` |
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` |
| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` |
| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` |
| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` |
| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` |
| `advancedConfiguration` | Configuration file content: advanced configuration | `""` |
| `ldap.enabled` | Enable LDAP support | `false` |
| `ldap.servers` | List of LDAP servers hostnames | `[]` |
| `ldap.port` | LDAP servers port | `389` |
| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` |
| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` |
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` |
| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` |
| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` |
| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with <release-name> prepended. | `false` |
### Statefulset parameters
| Name | Description | Value |
| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- |
| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` |
| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
| `podManagementPolicy` | Pod management policy | `OrderedReady` |
| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` |
| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` |
| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` |
| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` |
| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` |
| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` |
| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` |
| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` |
| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` |
| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` |
| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` |
| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` |
| `resources.limits` | The resources limits for RabbitMQ containers | `{}` |
| `resources.requests` | The requested resources for RabbitMQ containers | `{}` |
| `livenessProbe.enabled` | Enable livenessProbe | `true` |
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` |
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` |
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` |
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readinessProbe.enabled` | Enable readinessProbe | `true` |
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` |
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` |
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` |
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `customLivenessProbe` | Override default liveness probe | `{}` |
| `customReadinessProbe` | Override default readiness probe | `{}` |
| `customStartupProbe` | Define a custom startup probe | `{}` |
| `initContainers` | Add init containers to the RabbitMQ pod | `[]` |
| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` |
| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` |
| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` |
| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
### RBAC parameters
| Name | Description | Value |
| --------------------------------------------- | --------------------------------------------------- | ------ |
| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` |
| `serviceAccount.name` | Name of the created serviceAccount | `""` |
| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `true` |
| `rbac.create` | Whether RBAC rules should be created | `true` |
### Persistence parameters
| Name | Description | Value |
| --------------------------- | ------------------------------------------------ | --------------- |
| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` |
| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` |
| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` |
| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` |
| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` |
| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` |
| `persistence.volumes` | Additional volumes without creating PVC | `[]` |
| `persistence.annotations` | Persistence annotations. Evaluated as a template | `{}` |
### Exposure parameters
| Name | Description | Value |
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` |
| `service.port` | Amqp port | `5672` |
| `service.portName` | Amqp service port name | `amqp` |
| `service.tlsPort` | Amqp TLS port | `5671` |
| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` |
| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` |
| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` |
| `service.distPort` | Erlang distribution server port | `25672` |
| `service.distPortName` | Erlang distribution service port name | `dist` |
| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` |
| `service.managerPortEnabled` | RabbitMQ Manager port | `true` |
| `service.managerPort` | RabbitMQ Manager port | `15672` |
| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` |
| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` |
| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` |
| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` |
| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` |
| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` |
| `service.epmdPortName` | EPMD Discovery service port name | `epmd` |
| `service.extraPorts` | Extra ports to expose in the service | `[]` |
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` |
| `service.externalIPs` | Set the ExternalIPs | `[]` |
| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` |
| `service.labels` | Service labels. Evaluated as a template | `{}` |
| `service.annotations` | Service annotations. Evaluated as a template | `{}` |
| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` |
| `ingress.enabled` | Enable ingress resource for Management console | `false` |
| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` |
| `ingress.pathType` | Ingress path type | `ImplementationSpecific` |
| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` |
| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` |
| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` |
| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.secrets` | Custom TLS certificates as secrets | `[]` |
| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` |
### Metrics Parameters
| Name | Description | Value |
| ------------------------------------------ | ------------------------------------------------------------------------------------------------------ | --------------------- |
| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` |
| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` |
| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` |
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` |
| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` |
| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.relabellings` | MetricsRelabelConfigs to apply to samples before ingestion. DEPRECATED: Will be removed in next major. | `[]` |
| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` |
| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` |
| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` |
| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` |
| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` |
| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` |
| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` |
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` |
| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` |
### Init Container Parameters
| Name | Description | Value |
| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r301` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
The above parameters map to the env variables defined in [bitnami/rabbitmq](https://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](https://github.com/bitnami/bitnami-docker-rabbitmq) image documentation.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash
$ helm install my-release \
--set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \
bitnami/rabbitmq
```
The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install my-release -f values.yaml bitnami/rabbitmq
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Set pod affinity
This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
### Scale horizontally
To horizontally scale this chart once it has been deployed, two options are available:
- Use the `kubectl scale` command.
- Upgrade the chart modifying the `replicaCount` parameter.
> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart.
When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command.
Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/).
### Enable TLS support
To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation).
Once the certificates are generated, you have two alternatives:
* Create a secret with the certificates and associate the secret when deploying the chart
* Include the certificates in the *values.yaml* file when deploying the chart
Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate.
Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed.
Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls-ingress/).
### Load custom definitions
It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](https://www.rabbitmq.com/management.html#load-definitions).
Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value.
Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`.
> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters).
If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values.
Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/).
### Configure LDAP support
LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/).
### Configure memory high watermark
It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives:
* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below:
```
memoryHighWatermark.enabled="true"
memoryHighWatermark.type="absolute"
memoryHighWatermark.value="512MB"
```
* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below:
```
memoryHighWatermark.enabled="true"
memoryHighWatermark.type="relative"
memoryHighWatermark.value="0.4"
resources.limits.memory="2Gi"
```
### Add extra environment variables
In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
```yaml
extraEnvVars:
- name: LOG_LEVEL
value: error
```
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties.
### Use plugins
The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s.
To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ.
Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/).
### Recover the cluster from complete shutdown
> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand.
The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover.
This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state:
```console
$ kubectl delete statefulset STATEFULSET_NAME --cascade=false
$ helm upgrade RELEASE_NAME bitnami/rabbitmq \
--set podManagementPolicy=Parallel \
--set replicaCount=NUMBER_OF_REPLICAS \
--set auth.password=PASSWORD \
--set auth.erlangCookie=ERLANG_COOKIE
```
For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests.
If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod):
```console
$ helm upgrade RELEASE_NAME bitnami/rabbitmq \
--set podManagementPolicy=Parallel \
--set clustering.forceBoot=true \
--set replicaCount=NUMBER_OF_REPLICAS \
--set auth.password=PASSWORD \
--set auth.erlangCookie=ERLANG_COOKIE
```
More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting).
### Known issues
- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods.
## Persistence
The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container.
The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined.
### Use existing PersistentVolumeClaims
1. Create the PersistentVolume
1. Create the PersistentVolumeClaim
1. Install the chart
```bash
$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq
```
### Adjust permissions of the persistence volume mountpoint
As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination.
You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`.
### Configure the default user/vhost
If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`:
```yaml
auth:
username: default-user
extraConfiguration: |-
default_vhost = default-vhost
default_permissions.configure = .*
default_permissions.read = .*
default_permissions.write = .*
```
## Troubleshooting
Find more information about how to deal with common errors related to Bitnamis Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart:
```bash
$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE]
```
| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes.
### To 8.21.0
This new version of the chart bumps the RabbitMQ version to `3.9.1`. It is considered a minor release, and no breaking changes are expected. Additionally, RabbitMQ `3.9.X` nodes can run alongside `3.8.X` nodes.
See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) and the [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) for further documentation.
### To 8.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/).
### To 7.0.0
- Several parameters were renamed or disappeared in favor of new ones on this major version:
- `replicas` is renamed to `replicaCount`.
- `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`.
- Authentication parameters were reorganized under the `auth.*` parameter:
- `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively.
- `rabbitmq.tls.*` parameters are now under `auth.tls.*`.
- Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`.
- `rabbitmq.rabbitmqClusterNodeName` is deprecated.
- `rabbitmq.setUlimitNofiles` is deprecated.
- `forceBoot.enabled` is renamed to `clustering.forceBoot`.
- `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`.
- `metics.port` is remamed to `service.metricsPort`.
- `service.extraContainerPorts` is renamed to `extraContainerPorts`.
- `service.nodeTlsPort` is renamed to `service.tlsNodePort`.
- `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`.
- `rbacEnabled` -> deprecated in favor of `rbac.create`.
- New parameters: `serviceAccount.create`, and `serviceAccount.name`.
- New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`.
- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices.
- Initialization logic now relies on the container.
- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
Consequences:
- Backwards compatibility is not guaranteed.
- Compatibility with non Bitnami images is not guaranteed anymore.
### To 6.0.0
This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y.
### To 5.0.0
This major release changes the clustering method from `ip` to `hostname`.
This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change.
> IMPORTANT: Note that if you upgrade from a previous version you will lose your data.
### To 3.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq:
```console
$ kubectl delete statefulset rabbitmq --cascade=false
```
## Bitnami Kubernetes Documentation
Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources:
- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/)
- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/)
- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/)
- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/)
- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/)
## License
Copyright &copy; 2022 Bitnami
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,163 @@
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}}
{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}}
** Please be patient while the chart is being deployed **
{{- if .Values.diagnosticMode.enabled }}
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
Get the list of pods by executing:
kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
Access the pod you want to debug by executing
kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
In order to replicate the container startup scripts execute this command:
/opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh
{{- else }}
Credentials:
{{- if not .Values.loadDefinition.enabled }}
echo "Username : {{ .Values.auth.username }}"
echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)"
{{- end }}
echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)"
Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid.
This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading.
More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases.
RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }}
To access for outside the cluster, perform the following steps:
{{- if .Values.ingress.enabled }}
{{- if contains "NodePort" .Values.service.type }}
To Access the RabbitMQ AMQP port:
1. Obtain the NodePort IP and ports:
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }})
echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/"
{{- else if contains "LoadBalancer" .Values.service.type }}
To Access the RabbitMQ AMQP port:
1. Obtain the LoadBalancer IP:
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/"
{{- else if contains "ClusterIP" .Values.service.type }}
To Access the RabbitMQ AMQP port:
1. Create a port-forward to the AMQP port:
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} &
echo "URL : amqp://127.0.0.1:{{ $servicePort }}/"
{{- end }}
2. Access RabbitMQ using using the obtained URL.
To Access the RabbitMQ Management interface:
1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP:
export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters
echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/"
echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts
2. Open a browser and access RabbitMQ Management using the obtained URL.
{{- else }}
{{- if contains "NodePort" .Values.service.type }}
Obtain the NodePort IP and ports:
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }})
export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }})
To Access the RabbitMQ AMQP port:
echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/"
To Access the RabbitMQ Management interface:
echo "URL : http://$NODE_IP:$NODE_PORT_STATS/"
{{- else if contains "LoadBalancer" .Values.service.type }}
Obtain the LoadBalancer IP:
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
To Access the RabbitMQ AMQP port:
echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/"
To Access the RabbitMQ Management interface:
echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/"
{{- else if contains "ClusterIP" .Values.service.type }}
To Access the RabbitMQ AMQP port:
echo "URL : amqp://127.0.0.1:{{ $servicePort }}/"
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }}
To Access the RabbitMQ Management interface:
echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/"
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }}
{{- end }}
{{- end }}
{{- if .Values.metrics.enabled }}
To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running:
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} &
echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics"
Then, open the obtained URL in a browser.
{{- end }}
{{- include "common.warnings.rollingTag" .Values.image }}
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
{{- include "rabbitmq.validateValues" . -}}
{{- $requiredPassword := list -}}
{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}}
{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}}
{{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}}
{{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}}
{{- end -}}
{{- end }}

View File

@@ -0,0 +1,257 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "rabbitmq.name" -}}
{{- include "common.names.name" . -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "rabbitmq.fullname" -}}
{{- include "common.names.fullname" . -}}
{{- end -}}
{{/*
Return the proper RabbitMQ image name
*/}}
{{- define "rabbitmq.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the init container volume-permissions image)
*/}}
{{- define "rabbitmq.volumePermissions.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "rabbitmq.imagePullSecrets" -}}
{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }}
{{- end -}}
{{/*
Return podAnnotations
*/}}
{{- define "rabbitmq.podAnnotations" -}}
{{- if .Values.podAnnotations }}
{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }}
{{- end }}
{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }}
{{- end }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "rabbitmq.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Get the password secret.
*/}}
{{- define "rabbitmq.secretPasswordName" -}}
{{- if .Values.auth.existingPasswordSecret -}}
{{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}}
{{- else -}}
{{- printf "%s" (include "rabbitmq.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Get the erlang secret.
*/}}
{{- define "rabbitmq.secretErlangName" -}}
{{- if .Values.auth.existingErlangSecret -}}
{{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}}
{{- else -}}
{{- printf "%s" (include "rabbitmq.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Get the TLS secret.
*/}}
{{- define "rabbitmq.tlsSecretName" -}}
{{- if .Values.auth.tls.existingSecret -}}
{{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}}
{{- else -}}
{{- printf "%s-certs" (include "rabbitmq.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a TLS credentials secret object should be created
*/}}
{{- define "rabbitmq.createTlsSecret" -}}
{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper RabbitMQ plugin list
*/}}
{{- define "rabbitmq.plugins" -}}
{{- $plugins := .Values.plugins -}}
{{- if .Values.extraPlugins -}}
{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}}
{{- end -}}
{{- if .Values.metrics.enabled -}}
{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}}
{{- end -}}
{{- printf "%s" $plugins | replace " " ", " -}}
{{- end -}}
{{/*
Return the number of bytes given a value
following a base 2 o base 10 number system.
Usage:
{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }}
*/}}
{{- define "rabbitmq.toBytes" -}}
{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }}
{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }}
{{- if eq $unit "Ki" }}
{{- mul $value 1024 }}
{{- else if eq $unit "Mi" }}
{{- mul $value 1024 1024 }}
{{- else if eq $unit "Gi" }}
{{- mul $value 1024 1024 1024 }}
{{- else if eq $unit "Ti" }}
{{- mul $value 1024 1024 1024 1024 }}
{{- else if eq $unit "Pi" }}
{{- mul $value 1024 1024 1024 1024 1024 }}
{{- else if eq $unit "Ei" }}
{{- mul $value 1024 1024 1024 1024 1024 1024 }}
{{- else if eq $unit "K" }}
{{- mul $value 1000 }}
{{- else if eq $unit "M" }}
{{- mul $value 1000 1000 }}
{{- else if eq $unit "G" }}
{{- mul $value 1000 1000 1000 }}
{{- else if eq $unit "T" }}
{{- mul $value 1000 1000 1000 1000 }}
{{- else if eq $unit "P" }}
{{- mul $value 1000 1000 1000 1000 1000 }}
{{- else if eq $unit "E" }}
{{- mul $value 1000 1000 1000 1000 1000 1000 }}
{{- end }}
{{- end -}}
{{/*
Return true if cert-manager required annotations for TLS signed certificates are set in the Ingress annotations
Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
*/}}
{{- define "rabbitmq.ingress.certManagerRequest" -}}
{{ if or (hasKey . "cert-manager.io/cluster-issuer") (hasKey . "cert-manager.io/issuer") }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Compile all warnings into a single message, and call fail.
*/}}
{{- define "rabbitmq.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}}
{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}}
{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}}
{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
{{- if $message -}}
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
{{- end -}}
{{- end -}}
{{/*
Validate values of rabbitmq - LDAP support
*/}}
{{- define "rabbitmq.validateValues.ldap" -}}
{{- if .Values.ldap.enabled }}
{{- $serversListLength := len .Values.ldap.servers }}
{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }}
rabbitmq: LDAP
Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers",
"ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them:
$ helm install {{ .Release.Name }} bitnami/rabbitmq \
--set ldap.enabled=true \
--set ldap.servers[0]="lmy-ldap-server" \
--set ldap.port="389" \
--set user_dn_pattern="cn=${username},dc=example,dc=org"
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Validate values of rabbitmq - Memory high watermark
*/}}
{{- define "rabbitmq.validateValues.memoryHighWatermark" -}}
{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }}
rabbitmq: memoryHighWatermark.type
Invalid Memory high watermark type. Valid values are "absolute" and
"relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx")
{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }}
rabbitmq: memoryHighWatermark
You enabled configuring memory high watermark using a relative limit. However,
no memory limits were defined at POD level. Define your POD limits as shown below:
$ helm install {{ .Release.Name }} bitnami/rabbitmq \
--set memoryHighWatermark.enabled=true \
--set memoryHighWatermark.type="relative" \
--set memoryHighWatermark.value="0.4" \
--set resources.limits.memory="2Gi"
Altenatively, user an absolute value for the memory memory high watermark :
$ helm install {{ .Release.Name }} bitnami/rabbitmq \
--set memoryHighWatermark.enabled=true \
--set memoryHighWatermark.type="absolute" \
--set memoryHighWatermark.value="512MB"
{{- end -}}
{{- end -}}
{{/*
Validate values of rabbitmq - TLS configuration for Ingress
*/}}
{{- define "rabbitmq.validateValues.ingress.tls" -}}
{{- if and .Values.ingress.enabled .Values.ingress.tls (not (include "rabbitmq.ingress.certManagerRequest" .Values.ingress.annotations)) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }}
rabbitmq: ingress.tls
You enabled the TLS configuration for the default ingress hostname but
you did not enable any of the available mechanisms to create the TLS secret
to be used by the Ingress Controller.
Please use any of these alternatives:
- Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates.
- Relay on cert-manager to create it by setting the corresponding annotations
- Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
{{- end -}}
{{- end -}}
{{/*
Validate values of RabbitMQ - Auth TLS enabled
*/}}
{{- define "rabbitmq.validateValues.auth.tls" -}}
{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }}
rabbitmq: auth.tls
You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret.
Please use any of these alternatives:
- Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret`
- Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`.
- Enable auto-generated certificates using `auth.tls.autoGenerated`.
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,18 @@
{{- if .Values.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "rabbitmq.fullname" . }}-config
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
rabbitmq.conf: |-
{{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }}
{{- if .Values.advancedConfiguration }}
advanced.config: |-
{{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,4 @@
{{- range .Values.extraDeploy }}
---
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
{{- end }}

View File

@@ -0,0 +1,59 @@
{{- if .Values.enabled }}
{{- if .Values.ingress.enabled }}
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ include "rabbitmq.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
annotations:
{{- if .Values.ingress.certManager }}
kubernetes.io/tls-acme: "true"
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.ingress.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
ingressClassName: {{ .Values.ingress.ingressClassName | quote }}
{{- end }}
rules:
{{- if .Values.ingress.domain }}
- host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }}
http:
paths:
{{- if .Values.ingress.extraPaths }}
{{- toYaml .Values.ingress.extraPaths | nindent 10 }}
{{- end }}
- path: {{ .Values.ingress.path }}
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
pathType: {{ .Values.ingress.pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }}
{{- end }}
{{- range .Values.ingress.extraHosts }}
- host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }}
http:
paths:
- path: {{ default "/" .path }}
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }}
{{- end }}
{{- if or (and .Values.ingress.tls (or (include "rabbitmq.ingress.certManagerRequest" .Values.ingress.annotations) .Values.ingress.selfSigned)) .Values.ingress.extraTls }}
tls:
{{- if and .Values.ingress.tls (or (include "rabbitmq.ingress.certManagerRequest" .Values.ingress.annotations) .Values.ingress.selfSigned) }}
- hosts:
- {{ .Values.ingress.domain | quote }}
secretName: {{ printf "%s-tls" .Values.ingress.domain }}
{{- end }}
{{- if .Values.ingress.extraTls }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,39 @@
{{- if .Values.enabled }}
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: {{ include "rabbitmq.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
podSelector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
ingress:
# Allow inbound connections
- ports:
- port: 4369 # EPMD
- port: {{ .Values.service.port }}
- port: {{ .Values.service.tlsPort }}
- port: {{ .Values.service.distPort }}
- port: {{ .Values.service.managerPort }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels:
{{ template "rabbitmq.fullname" . }}-client: "true"
- podSelector:
matchLabels:
{{- include "common.labels.matchLabels" . | nindent 14 }}
{{- if .Values.networkPolicy.additionalRules }}
{{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }}
{{- end }}
{{- end }}
# Allow prometheus scrapes
- ports:
- port: {{ .Values.service.metricsPort }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,22 @@
{{- if .Values.enabled }}
{{- if .Values.pdb.create }}
apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "rabbitmq.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if .Values.pdb.minAvailable }}
minAvailable: {{ .Values.pdb.minAvailable }}
{{- end }}
{{- if .Values.pdb.maxUnavailable }}
maxUnavailable: {{ .Values.pdb.maxUnavailable }}
{{- end }}
selector:
matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.enabled }}
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "rabbitmq.fullname" . }}
{{- if .Values.metrics.prometheusRule.namespace }}
namespace: {{ .Values.metrics.prometheusRule.namespace }}
{{- else }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.metrics.prometheusRule.additionalLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
groups:
{{- with .Values.metrics.prometheusRule.rules }}
- name: {{ template "rabbitmq.name" $ }}
rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,21 @@
{{- if .Values.enabled }}
{{- if .Values.rbac.create }}
kind: Role
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
metadata:
name: {{ template "rabbitmq.fullname" . }}-endpoint-reader
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
{{- end }}
{{- end }}

View File

@@ -0,0 +1,20 @@
{{- if .Values.enabled }}
{{- if and .Values.serviceAccount.create .Values.rbac.create }}
kind: RoleBinding
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
metadata:
name: {{ template "rabbitmq.fullname" . }}-endpoint-reader
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "rabbitmq.serviceAccountName" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "rabbitmq.fullname" . }}-endpoint-reader
{{- end }}
{{- end }}

View File

@@ -0,0 +1,48 @@
{{- if .Values.enabled }}
{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "rabbitmq.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }}
{{- if .Values.auth.password }}
rabbitmq-password: {{ .Values.auth.password | b64enc | quote }}
{{- else }}
rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }}
{{- end }}
{{- end }}
{{- if not .Values.auth.existingErlangSecret }}
{{- if .Values.auth.erlangCookie }}
rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }}
{{- else }}
rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }}
{{- range $key, $value := .Values.extraSecrets }}
---
apiVersion: v1
kind: Secret
metadata:
{{- if $extraSecretsPrependReleaseName }}
name: {{ $.Release.Name }}-{{ $key }}
{{- else }}
name: {{ $key }}
{{- end }}
namespace: {{ $.Release.Namespace | quote }}
labels: {{- include "common.labels.standard" $ | nindent 4 }}
{{- if $.Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- if .Values.enabled }}
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "rabbitmq.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
secrets:
- name: {{ include "rabbitmq.fullname" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,56 @@
{{- if .Values.enabled }}
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "rabbitmq.fullname" . }}
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- else }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.metrics.serviceMonitor.additionalLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: metrics
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.honorLabels }}
honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.relabelings }}
relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.relabellings }}
metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }}
{{- else if .Values.metrics.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.path }}
path: {{ .Values.metrics.serviceMonitor.path }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
{{- with .Values.metrics.serviceMonitor.podTargetLabels }}
podTargetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.metrics.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
{{- end }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More