add
This commit is contained in:
25
3-湘潭钢铁项目/1-base-env-shell/0.0-dependencies.sh
Normal file
25
3-湘潭钢铁项目/1-base-env-shell/0.0-dependencies.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# 需要在所有的节点执行
|
||||
|
||||
hostnamectl set-hostname storage-1
|
||||
|
||||
# sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
|
||||
|
||||
|
||||
|
||||
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
|
||||
systemctl restart sshd
|
||||
|
||||
cat >> /etc/hosts << EOF
|
||||
172.32.12.34 master-node
|
||||
172.32.12.35 worker-1
|
||||
172.32.12.36 worker-2
|
||||
172.32.12.37 worker-3
|
||||
172.32.12.38 storage-1
|
||||
EOF
|
||||
|
||||
|
||||
bash <(curl -L -s https://cdn.jsdelivr.net/gh/teddysun/across/bbr.sh)
|
||||
|
||||
|
||||
74
3-湘潭钢铁项目/1-base-env-shell/0.1-mountNodeVolume.sh
Normal file
74
3-湘潭钢铁项目/1-base-env-shell/0.1-mountNodeVolume.sh
Normal file
@@ -0,0 +1,74 @@
|
||||
#! /bin/bash
|
||||
|
||||
## 关闭虚拟缓存
|
||||
#swapoff -a
|
||||
#cp -f /etc/fstab /etc/fstab_bak
|
||||
#cat /etc/fstab_bak | grep -v swap >/etc/fstab
|
||||
|
||||
# echo "-----------------------------------------------------------------------"
|
||||
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
|
||||
# echo "扩容之前的root目录的容量为:${RootVolumeSizeBefore}"
|
||||
|
||||
# echo "y
|
||||
|
||||
|
||||
# " | lvremove /dev/mapper/centos-swap
|
||||
|
||||
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
|
||||
|
||||
# lvextend -l+${freepesize} /dev/mapper/centos-root
|
||||
|
||||
|
||||
# ## #自动扩展XFS文件系统到最大的可用大小
|
||||
# xfs_growfs /dev/mapper/centos-root
|
||||
|
||||
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
|
||||
|
||||
# echo "-----------------------------------------------------------------------"
|
||||
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
|
||||
# echo "扩容之后的root目录的容量为:${RootVolumeSizeAfter}"
|
||||
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
|
||||
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
|
||||
|
||||
# echo "恭喜,您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
echo "-----------------------------------------------------------------------"
|
||||
|
||||
export VG_NAME=minio-vg
|
||||
export LV_NAME=4
|
||||
echo "n
|
||||
p
|
||||
|
||||
|
||||
|
||||
t
|
||||
|
||||
8e
|
||||
w
|
||||
" | fdisk /dev/vdf
|
||||
partprobe
|
||||
|
||||
#vgcreate ${VG_NAME} /dev/vdd1
|
||||
vgextend ${VG_NAME} /dev/vdf1
|
||||
export selfpesize=319999
|
||||
lvcreate -l ${selfpesize} -n ${LV_NAME} ${VG_NAME}
|
||||
mkfs.xfs /dev/${VG_NAME}/${LV_NAME}
|
||||
|
||||
export selffstab="/dev/${VG_NAME}/${LV_NAME} /data/minio-pv/pv4 xfs defaults 0 0"
|
||||
echo "${selffstab}" >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo ""
|
||||
df -TH
|
||||
echo "-----------------------------------------------------------------------"
|
||||
|
||||
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
|
||||
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
|
||||
# xfs_growfs /dev/mapper/${VG_NAME}-root
|
||||
|
||||
|
||||
2031
3-湘潭钢铁项目/1-base-env-shell/1-nodeImportant.sh
Normal file
2031
3-湘潭钢铁项目/1-base-env-shell/1-nodeImportant.sh
Normal file
File diff suppressed because it is too large
Load Diff
8
3-湘潭钢铁项目/1-base-env-shell/1.java
Normal file
8
3-湘潭钢铁项目/1-base-env-shell/1.java
Normal file
@@ -0,0 +1,8 @@
|
||||
class Solution {
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
144
3-湘潭钢铁项目/1-base-env-shell/2-imageDownSync.sh
Normal file
144
3-湘潭钢铁项目/1-base-env-shell/2-imageDownSync.sh
Normal file
@@ -0,0 +1,144 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
### 需要修改以下的内容 ###
|
||||
#### 需要修改以下的内容 ###
|
||||
#### 需要修改以下的内容 ###
|
||||
|
||||
cmlc_app_image_list="cmlc-app-images.txt" # 需要修改版本
|
||||
rancher_image_list="kubernetes-1.20.4.txt" # 一般不需要修改
|
||||
middleware_image_list="middleware-images.txt" # 一般不需要修改
|
||||
DockerRegisterDomain="192.168.8.65:8033" # 需要根据实际修改
|
||||
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
|
||||
|
||||
#### 需要修改以上的内容 ###
|
||||
#### 需要修改以上的内容 ###
|
||||
#### 需要修改以上的内容 ###
|
||||
|
||||
downloadAllNeededImages() {
|
||||
while [[ $# > 0 ]]; do
|
||||
pulled=""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "开始下载:${i}"
|
||||
if docker pull "${i}" >/dev/null 2>&1; then
|
||||
echo "Image pull success: ${i}"
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
if docker inspect "${i}" >/dev/null 2>&1; then
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
echo "Image pull failed: ${i}"
|
||||
fi
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${1}"
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
downloadAllNeededImagesAndCompress() {
|
||||
while [[ $# > 0 ]]; do
|
||||
pulled=""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "开始下载:${i}"
|
||||
if docker pull "${i}" >/dev/null 2>&1; then
|
||||
echo "Image pull success: ${i}"
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
if docker inspect "${i}" >/dev/null 2>&1; then
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
echo "Image pull failed: ${i}"
|
||||
fi
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${1}"
|
||||
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
|
||||
|
||||
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
|
||||
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
pushRKEImageToHarbor(){
|
||||
linux_images=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
linux_images+=("${i}");
|
||||
done < "${rancher_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
|
||||
for i in "${linux_images[@]}"; do
|
||||
[ -z "${i}" ] && continue
|
||||
case $i in
|
||||
*/*)
|
||||
image_name="${DockerRegisterDomain}/${i}"
|
||||
;;
|
||||
*)
|
||||
image_name="${DockerRegisterDomain}/rancher/${i}"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${i}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
pushCMLCAPPImageToHarbor(){
|
||||
app_images=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
app_images+=("${i}");
|
||||
done < "${cmlc_app_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
for app in "${app_images[@]}"; do
|
||||
[ -z "${app}" ] && continue
|
||||
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${app}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
pushMiddlewareImageToHarbor(){
|
||||
middleware_image=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
middleware_image+=("${i}");
|
||||
done < "${middleware_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
for app in "${middleware_image[@]}"; do
|
||||
[ -z "${app}" ] && continue
|
||||
case ${app} in
|
||||
*/*/*)
|
||||
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
|
||||
;;
|
||||
*/*)
|
||||
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${app}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
#downloadAllNeededImagesAndCompress "kubernetes-1.20.4.txt"
|
||||
downloadAllNeededImages "middleware.txt"
|
||||
#
|
||||
#pushRKEImageToHarbor
|
||||
#pushMiddlewareImageToHarbor
|
||||
235
3-湘潭钢铁项目/1-base-env-shell/3-bootUPk8s.sh
Normal file
235
3-湘潭钢铁项目/1-base-env-shell/3-bootUPk8s.sh
Normal file
@@ -0,0 +1,235 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
### 需要修改以下的内容 ###
|
||||
### 需要修改以下的内容 ###
|
||||
### 需要修改以下的内容 ###
|
||||
|
||||
# 理论上来说,能访问公网的服务器 用来部署Harbor服务器
|
||||
# 所有的主机均可以访问公网的话,填写 除了harbor服务器的 其他所有主机的地址
|
||||
PrivateServerIPs=(192.168.8.66 192.168.8.67 192.168.8.68) # 内网服务器的IP地址,不包括可以访问公网IP的服务器
|
||||
|
||||
### 需要修改以上的内容 ###
|
||||
### 需要修改以上的内容 ###
|
||||
### 需要修改以上的内容 ###
|
||||
|
||||
RED="31m" ## 姨妈红
|
||||
GREEN="32m" ## 水鸭青
|
||||
YELLOW="33m" ## 鸭屎黄
|
||||
PURPLE="35m" ## 基佬紫
|
||||
BLUE="36m" ## 天依蓝
|
||||
|
||||
colorEcho() {
|
||||
# shellcheck disable=SC2145
|
||||
echo -e "\033[${1}${@:2}\033[0m" 1>&2
|
||||
}
|
||||
|
||||
check_root() {
|
||||
if [[ $EUID != 0 ]]; then
|
||||
colorEcho ${RED} "当前非root账号(或没有root权限),无法继续操作,请更换root账号!"
|
||||
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限(执行后可能会提示输入root密码)"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
startFunc(){
|
||||
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
|
||||
colorEcho ${BLUE} "开始执行 启动RKE集群的操作 ………"
|
||||
echo ""
|
||||
|
||||
colorEcho ${BLUE} "本脚本的运行有一些列的前提依赖,请确定以下的项目都已完成!!!!"
|
||||
colorEcho ${YELLOW} "----------------------------------------------------------"
|
||||
colorEcho ${RED} "1. 完成基础环境初始化,将rke系统镜像均上传至私有Harbor中!"
|
||||
colorEcho ${RED} "2. 配置并修改好 rke集群的模板文件,命名为 cluster.yml !!"
|
||||
colorEcho ${RED} "3. ……"
|
||||
colorEcho ${YELLOW} "----------------------------------------------------------"
|
||||
|
||||
while true; do
|
||||
colorEcho ${RED} "请确保您已经将上述的项目完成!!"
|
||||
read -r -p "请输入yes进行确认,脚本才可继续运行!!" input
|
||||
case $input in
|
||||
yes)
|
||||
colorEcho ${GREEN} "您已确认上述的项目均已完成!!"
|
||||
colorEcho ${GREEN} "----------------------------------------------------------"
|
||||
echo ""
|
||||
colorEcho ${BLUE} "开始执行 RKE集群的启动过程!!!"
|
||||
echo ""
|
||||
main
|
||||
break
|
||||
;;
|
||||
*)
|
||||
echo ""
|
||||
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
|
||||
break
|
||||
colorEcho ${RED} "-----------------------------------------------------"
|
||||
echo ""
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
installRKE(){
|
||||
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
|
||||
colorEcho ${BLUE} "开始下载并安装 RKE 工具 ………"
|
||||
echo ""
|
||||
|
||||
colorEcho ${BLUE} "开始从rancher镜像下载rke工具……"
|
||||
wget http://rancher-mirror.cnrancher.com/rke/v1.2.6/rke_linux-amd64
|
||||
if [ -s rke_linux-amd64 ]; then
|
||||
colorEcho ${GREEN} "rke工具下载完成!"
|
||||
chmod +x rke_linux-amd64
|
||||
mv ./rke_linux-amd64 /usr/local/bin/rke
|
||||
colorEcho ${GREEN} "----------------------------------------------------------"
|
||||
rke --version
|
||||
colorEcho ${GREEN} "----------------------------------------------------------"
|
||||
rke config --list-version --all
|
||||
echo ""
|
||||
colorEcho ${BLUE} "开始从rancher镜像下载 kubectl 工具……"
|
||||
wget http://rancher-mirror.cnrancher.com/kubectl/v1.20.4/linux-amd64-v1.20.4-kubectl
|
||||
chmod +x linux-amd64-v1.20.4-kubectl
|
||||
mv linux-amd64-v1.20.4-kubectl /usr/local/bin/kubectl
|
||||
colorEcho ${GREEN} "----------------------------------------------------------"
|
||||
kubectl version
|
||||
colorEcho ${GREEN} "----------------------------------------------------------"
|
||||
else
|
||||
colorEcho ${RED} "rke工具下载失败!!!脚本无法继续运行,请手动下载rke工具!"
|
||||
colorEcho ${RED} "rke工具下载失败!!!脚本无法继续运行,请手动下载rke工具!"
|
||||
colorEcho ${RED} "rke工具下载失败!!!脚本无法继续运行,请手动下载rke工具!"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
createRKEInstallerUser(){
|
||||
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
|
||||
colorEcho ${BLUE} "开始创建 rke-installer 用户………"
|
||||
echo ""
|
||||
useradd rke-installer
|
||||
echo "rke-installer
|
||||
rke-installer
|
||||
" | passwd rke-installer
|
||||
#将登陆用户develop加入到docker用户组中
|
||||
gpasswd -a rke-installer docker
|
||||
#更新用户组
|
||||
newgrp docker
|
||||
echo ""
|
||||
if [ -d /home/rke-installer ]; then
|
||||
colorEcho ${GREEN} "rke-installer 用户创建成功!! "
|
||||
echo ""
|
||||
else
|
||||
colorEcho ${YELLOW} "检测到 rke-installer 用户已经存在"
|
||||
fi
|
||||
|
||||
if [[ -s cluster.yaml || -s cluster.yml ]]; then
|
||||
colorEcho ${BLUE} "开始将 cluster.yaml文件复制到 rke-installer目录下…………"
|
||||
mv cluster.y* /home/rke-installer/cluster.yml
|
||||
if [ -s /home/rke-installer/cluster.yml ]; then
|
||||
colorEcho ${BLUE} "cluster.yml文件已经放置完成!"
|
||||
chown rke-installer:rke-installer /home/rke-installer/cluster.yml
|
||||
else
|
||||
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件!!"
|
||||
colorEcho ${RED} "程序无法继续,将退出!!"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件!!"
|
||||
colorEcho ${RED} "程序无法继续,将退出!!"
|
||||
echo ""
|
||||
colorEcho ${YELLOW} "--------------------------------------------------"
|
||||
colorEcho ${RED} "请创建RKE集群的模板文件,并命名为 cluster.yml "
|
||||
colorEcho ${RED} "请创建RKE集群的模板文件,并命名为 cluster.yml "
|
||||
colorEcho ${RED} "请创建RKE集群的模板文件,并命名为 cluster.yml "
|
||||
colorEcho ${YELLOW} "--------------------------------------------------"
|
||||
return 1
|
||||
fi
|
||||
|
||||
colorEcho ${BLUE} "开始切换当前用户至 rke-installer "
|
||||
su rke-installer
|
||||
echo ""
|
||||
colorEcho ${BLUE} "请检查rke-installer用户能否执行 docker ps 命令!!"
|
||||
docker ps
|
||||
colorEcho ${BLUE} "----------------------------------------------------------"
|
||||
}
|
||||
|
||||
generateRKEUserKey(){
|
||||
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
|
||||
colorEcho ${BLUE} "开始创建 rke-installer用户的 ssh key ……"
|
||||
echo ""
|
||||
su rke-installer
|
||||
ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
|
||||
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
|
||||
chmod 600 ~/.ssh/authorized_keys
|
||||
colorEcho ${GREEN} "--------------------------------------------------------------"
|
||||
colorEcho ${GREEN} "-----------本机配置完成!-------------"
|
||||
echo ""
|
||||
|
||||
for ip in "${PrivateServerIPs[@]}"; do
|
||||
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
|
||||
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
|
||||
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
|
||||
colorEcho ${BLUE} "-----------------------------------------------"
|
||||
echo ""
|
||||
echo ""
|
||||
colorEcho ${RED} " 请以 root 角色 运行!!! "
|
||||
colorEcho ${RED} " 请以 root 角色 运行!!! "
|
||||
colorEcho ${RED} " 请以 root 角色 运行!!! "
|
||||
echo ""
|
||||
echo "useradd rke-installer && echo \"rke-installer
|
||||
rke-installer
|
||||
\" | passwd rke-installer && gpasswd -a rke-installer docker && newgrp docker && su rke-installer && docker ps "
|
||||
echo ""
|
||||
echo "clear && ssh-keygen -t rsa -P \"\" -f ~/.ssh/id_rsa && echo \"$(cat ~/.ssh/id_rsa.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys"
|
||||
echo ""
|
||||
echo ""
|
||||
while true; do
|
||||
colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!"
|
||||
read -r -p "请输入yes进行确认,脚本才可继续运行!!" input
|
||||
case $input in
|
||||
yes)
|
||||
colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key!"
|
||||
echo ""
|
||||
break
|
||||
;;
|
||||
*)
|
||||
echo ""
|
||||
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
|
||||
colorEcho ${RED} "请在主机${ip}上执行上述命令!!!"
|
||||
colorEcho ${RED} "否则本脚本的功能会失效!!"
|
||||
colorEcho ${RED} "-----------------------------------------------------"
|
||||
echo ""
|
||||
;;
|
||||
esac
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
startRKECLuster(){
|
||||
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
|
||||
colorEcho ${BLUE} "开始 启动 rke集群 !!!"
|
||||
colorEcho ${BLUE} "开始 启动 rke集群 !!!"
|
||||
colorEcho ${BLUE} "开始 启动 rke集群 !!!"
|
||||
echo ""
|
||||
if [[ $(pwd) == "/home/rke-installer" ]]; then
|
||||
colorEcho ${BLUE} "检测到当前目录为 /home/rke-installer"
|
||||
echo ""
|
||||
colorEcho ${BLUE} "开始执行 RKE 集群的启动过程 !!!"
|
||||
colorEcho ${BLUE} "-------------------------------------------------------------"
|
||||
for i in {3..1..-1}; do
|
||||
colorEcho ${BLUE} "倒计时开始 ->> $i 秒 <<-,准备启动RKE,上文的日志输出将会消失!!"
|
||||
sleep 2
|
||||
done
|
||||
clear
|
||||
rke up
|
||||
else
|
||||
colorEcho ${BLUE} "当前目录不为 /home/rke-installer,开始跳转目录!"
|
||||
cd /home/rke-installer
|
||||
startRKECLuster
|
||||
fi
|
||||
}
|
||||
|
||||
main(){
|
||||
check_root
|
||||
|
||||
generateRKEUserKey || return $?
|
||||
startRKECLuster || return $?
|
||||
}
|
||||
|
||||
startFunc
|
||||
40
3-湘潭钢铁项目/1-base-env-shell/cmlc-app-images.txt
Normal file
40
3-湘潭钢铁项目/1-base-env-shell/cmlc-app-images.txt
Normal file
@@ -0,0 +1,40 @@
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:3.1.0-nicp
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:3.1.0-nicp
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-device:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-cms:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-notice:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-developer:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-open-gateway:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-admin-user:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-admin-data:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-logger:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-user:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-brain:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-process:3.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:v1.0.0-no-retention
|
||||
harbor.cdcyy.com.cn/cmii/cmii-srs-operator:v1.0.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-srs-operator:v1.2.0
|
||||
|
||||
|
||||
20
3-湘潭钢铁项目/1-base-env-shell/kubernetes-admin-user-rbac.yaml
Normal file
20
3-湘潭钢铁项目/1-base-env-shell/kubernetes-admin-user-rbac.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: admin-user
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: admin-user
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: admin-user
|
||||
namespace: kube-system
|
||||
|
||||
299
3-湘潭钢铁项目/1-base-env-shell/kubernetes-dashboad.yaml
Normal file
299
3-湘潭钢铁项目/1-base-env-shell/kubernetes-dashboad.yaml
Normal file
@@ -0,0 +1,299 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
type: NodePort
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: 172.32.12.34/8033/kubernetesui/dashboard:v2.0.1
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kube-system
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: 172.32.12.34/8033/kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
64
3-湘潭钢铁项目/1-base-env-shell/kubernetes-images-2.5.7-1.18.6.txt
Normal file
64
3-湘潭钢铁项目/1-base-env-shell/kubernetes-images-2.5.7-1.18.6.txt
Normal file
@@ -0,0 +1,64 @@
|
||||
busybox
|
||||
rancher/backup-restore-operator:v1.0.3
|
||||
rancher/calico-cni:v3.13.4
|
||||
rancher/calico-ctl:v3.13.4
|
||||
rancher/calico-kube-controllers:v3.13.4
|
||||
rancher/calico-node:v3.13.4
|
||||
rancher/calico-pod2daemon-flexvol:v3.13.4
|
||||
rancher/cis-operator:v1.0.3
|
||||
rancher/cluster-proportional-autoscaler:1.7.1
|
||||
rancher/configmap-reload:v0.3.0-rancher4
|
||||
rancher/coredns-coredns:1.6.9
|
||||
rancher/coreos-etcd:v3.4.3-rancher1
|
||||
rancher/coreos-flannel:v0.12.0
|
||||
rancher/coreos-flannel:v0.13.0-rancher1
|
||||
rancher/coreos-kube-state-metrics:v1.9.7
|
||||
rancher/coreos-prometheus-config-reloader:v0.39.0
|
||||
rancher/coreos-prometheus-operator:v0.39.0
|
||||
rancher/externalip-webhook:v0.1.6
|
||||
rancher/flannel-cni:v0.3.0-rancher6
|
||||
rancher/fleet-agent:v0.3.4
|
||||
rancher/fleet:v0.3.4
|
||||
rancher/fluentd:v0.1.24
|
||||
rancher/grafana-grafana:7.1.5
|
||||
rancher/hyperkube:v1.18.16-rancher1
|
||||
rancher/jimmidyson-configmap-reload:v0.3.0
|
||||
rancher/k8s-dns-dnsmasq-nanny:1.15.2
|
||||
rancher/k8s-dns-kube-dns:1.15.2
|
||||
rancher/k8s-dns-node-cache:1.15.7
|
||||
rancher/k8s-dns-sidecar:1.15.2
|
||||
rancher/klipper-lb:v0.1.2
|
||||
rancher/kube-api-auth:v0.1.4
|
||||
rancher/kubectl:v1.18.6
|
||||
rancher/kubernetes-external-dns:v0.7.3
|
||||
rancher/library-busybox:1.31.1
|
||||
rancher/library-busybox:1.32.1
|
||||
rancher/library-nginx:1.19.2-alpine
|
||||
rancher/library-traefik:1.7.19
|
||||
rancher/local-path-provisioner:v0.0.11
|
||||
rancher/local-path-provisioner:v0.0.14
|
||||
rancher/local-path-provisioner:v0.0.19
|
||||
rancher/log-aggregator:v0.1.7
|
||||
rancher/istio-kubectl:1.5.10
|
||||
rancher/metrics-server:v0.3.6
|
||||
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
|
||||
rancher/nginx-ingress-controller:nginx-0.35.0-rancher2
|
||||
rancher/opa-gatekeeper:v3.1.0-beta.7
|
||||
rancher/openzipkin-zipkin:2.14.2
|
||||
rancher/pause:3.1
|
||||
rancher/plugins-docker:18.09
|
||||
rancher/prom-alertmanager:v0.21.0
|
||||
rancher/prom-node-exporter:v1.0.1
|
||||
rancher/prom-prometheus:v2.12.0
|
||||
rancher/prom-prometheus:v2.18.2
|
||||
rancher/prometheus-auth:v0.2.1
|
||||
rancher/rancher-agent:v2.5.7
|
||||
rancher/rancher-webhook:v0.1.0-beta9
|
||||
rancher/rancher:v2.5.7
|
||||
rancher/rke-tools:v0.1.72
|
||||
rancher/security-scan:v0.1.14
|
||||
rancher/security-scan:v0.2.2
|
||||
rancher/shell:v0.1.6
|
||||
rancher/sonobuoy-sonobuoy:v0.16.3
|
||||
rancher/system-upgrade-controller:v0.6.2
|
||||
|
||||
61
3-湘潭钢铁项目/1-base-env-shell/kubernetes-images-2.5.7-1.20.4.txt
Normal file
61
3-湘潭钢铁项目/1-base-env-shell/kubernetes-images-2.5.7-1.20.4.txt
Normal file
@@ -0,0 +1,61 @@
|
||||
busybox
|
||||
rancher/backup-restore-operator:v1.0.3
|
||||
rancher/calico-cni:v3.17.2
|
||||
rancher/calico-ctl:v3.17.2
|
||||
rancher/calico-kube-controllers:v3.17.2
|
||||
rancher/calico-node:v3.17.2
|
||||
rancher/calico-pod2daemon-flexvol:v3.17.2
|
||||
rancher/cis-operator:v1.0.3
|
||||
rancher/cluster-proportional-autoscaler:1.7.1
|
||||
rancher/coredns-coredns:1.8.0
|
||||
rancher/coreos-etcd:v3.4.14-rancher1
|
||||
rancher/coreos-kube-state-metrics:v1.9.7
|
||||
rancher/coreos-prometheus-config-reloader:v0.39.0
|
||||
rancher/coreos-prometheus-operator:v0.39.0
|
||||
rancher/externalip-webhook:v0.1.6
|
||||
rancher/flannel-cni:v0.3.0-rancher6
|
||||
rancher/fleet-agent:v0.3.4
|
||||
rancher/fleet:v0.3.4
|
||||
rancher/fluentd:v0.1.24
|
||||
rancher/grafana-grafana:7.1.5
|
||||
rancher/hyperkube:v1.20.4-rancher1
|
||||
rancher/jimmidyson-configmap-reload:v0.3.0
|
||||
rancher/k8s-dns-dnsmasq-nanny:1.15.2
|
||||
rancher/k8s-dns-kube-dns:1.15.2
|
||||
rancher/k8s-dns-node-cache:1.15.13
|
||||
rancher/k8s-dns-sidecar:1.15.2
|
||||
rancher/klipper-lb:v0.1.2
|
||||
rancher/kube-api-auth:v0.1.4
|
||||
rancher/kubectl:v1.20.4
|
||||
rancher/kubernetes-external-dns:v0.7.3
|
||||
rancher/library-busybox:1.31.1
|
||||
rancher/library-busybox:1.32.1
|
||||
rancher/library-nginx:1.19.2-alpine
|
||||
rancher/library-traefik:1.7.19
|
||||
rancher/local-path-provisioner:v0.0.11
|
||||
rancher/local-path-provisioner:v0.0.14
|
||||
rancher/local-path-provisioner:v0.0.19
|
||||
rancher/log-aggregator:v0.1.7
|
||||
rancher/istio-kubectl:1.5.10
|
||||
rancher/metrics-server:v0.4.1
|
||||
rancher/configmap-reload:v0.3.0-rancher4
|
||||
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
|
||||
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
|
||||
rancher/opa-gatekeeper:v3.1.0-beta.7
|
||||
rancher/openzipkin-zipkin:2.14.2
|
||||
rancher/pause:3.2
|
||||
rancher/plugins-docker:18.09
|
||||
rancher/prom-alertmanager:v0.21.0
|
||||
rancher/prom-node-exporter:v1.0.1
|
||||
rancher/prom-prometheus:v2.18.2
|
||||
rancher/prometheus-auth:v0.2.1
|
||||
rancher/rancher-agent:v2.5.7
|
||||
rancher/rancher-webhook:v0.1.0-beta9
|
||||
rancher/rancher:v2.5.7
|
||||
rancher/rke-tools:v0.1.72
|
||||
rancher/security-scan:v0.1.14
|
||||
rancher/security-scan:v0.2.2
|
||||
rancher/shell:v0.1.6
|
||||
rancher/sonobuoy-sonobuoy:v0.16.3
|
||||
rancher/system-upgrade-controller:v0.6.2
|
||||
|
||||
22
3-湘潭钢铁项目/1-base-env-shell/middleware-images.txt
Normal file
22
3-湘潭钢铁项目/1-base-env-shell/middleware-images.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
docker.io/bitnami/redis:6.2.6-debian-10-r0
|
||||
docker.io/bitnami/mysql:8.0.26-debian-10-r0
|
||||
docker.io/bitnami/bitnami-shell:10-debian-10-r140
|
||||
docker.io/bitnami/rabbitmq:3.9.12-debian-10-r3
|
||||
docker.io/bitnami/minio:2021.3.26-debian-10-r0
|
||||
docker.io/ossrs/srs:v4.0.136
|
||||
docker.io/emqx/emqx:4.2.12
|
||||
docker.io/nacos/nacos-server:2.0.1
|
||||
docker.io/mongo:5.0
|
||||
docker.io/rabbitmq:3.7-management
|
||||
docker.io/v2fly/v2fly-core:v4.38.3
|
||||
docker.io/pollyduan/ingress-nginx-controller:v0.44.0
|
||||
docker.io/jettech/kube-webhook-certgen:v1.5.1
|
||||
docker.io/minio/minio:RELEASE.2022-03-26T06-49-28Z
|
||||
docker.io/bitnami/minio:2022.5.4
|
||||
docker.io/kubernetesui/dashboard:v2.0.1
|
||||
docker.io/kubernetesui/metrics-scraper:v1.0.4
|
||||
docker.io/ossrs/srs:v4.0-b9
|
||||
docker.io/nginx:1.21.3
|
||||
docker.io/minio/console:v0.15.6
|
||||
docker.io/minio/operator:v4.4.13
|
||||
|
||||
240
3-湘潭钢铁项目/1-base-env-shell/rke-cluster.yml
Normal file
240
3-湘潭钢铁项目/1-base-env-shell/rke-cluster.yml
Normal file
@@ -0,0 +1,240 @@
|
||||
nodes:
|
||||
- address: 172.32.12.34
|
||||
user: root
|
||||
role:
|
||||
- controlplane
|
||||
- etcd
|
||||
- worker
|
||||
internal_address: 172.32.12.34
|
||||
- address: 172.32.12.35
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
internal_address: 172.32.12.35
|
||||
- address: 172.32.12.36
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 172.32.12.36
|
||||
- address: 172.32.12.37
|
||||
user: root
|
||||
role:
|
||||
- worker
|
||||
internal_address: 172.32.12.37
|
||||
labels:
|
||||
mysql-deploy: true
|
||||
# - address: 192.168.8.68
|
||||
# user: root
|
||||
# role:
|
||||
# - worker
|
||||
# internal_address: 192.168.8.68
|
||||
# labels:
|
||||
# minio-deploy: true
|
||||
|
||||
authentication:
|
||||
strategy: x509
|
||||
sans:
|
||||
- "172.32.12.34"
|
||||
|
||||
private_registries:
|
||||
- url: 172.32.12.34:8033 # 私有镜像库地址
|
||||
user: admin
|
||||
password: "V2ryStr@ngPss"
|
||||
is_default: true
|
||||
|
||||
##############################################################################
|
||||
|
||||
# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
|
||||
ignore_docker_version: true
|
||||
|
||||
# Set the name of the Kubernetes cluster
|
||||
cluster_name: rke-cluster
|
||||
|
||||
kubernetes_version: v1.20.4-rancher1-1
|
||||
|
||||
ssh_key_path: /root/.ssh/id_rsa
|
||||
|
||||
# Enable running cri-dockerd
|
||||
# Up to Kubernetes 1.23, kubelet contained code called dockershim
|
||||
# to support Docker runtime. The replacement is called cri-dockerd
|
||||
# and should be enabled if you want to keep using Docker as your
|
||||
# container runtime
|
||||
# Only available to enable in Kubernetes 1.21 and higher
|
||||
enable_cri_dockerd: true
|
||||
|
||||
|
||||
services:
|
||||
etcd:
|
||||
backup_config:
|
||||
enabled: false
|
||||
interval_hours: 72
|
||||
retention: 3
|
||||
safe_timestamp: false
|
||||
timeout: 300
|
||||
creation: 12h
|
||||
extra_args:
|
||||
election-timeout: 5000
|
||||
heartbeat-interval: 500
|
||||
gid: 0
|
||||
retention: 72h
|
||||
snapshot: false
|
||||
uid: 0
|
||||
|
||||
kube-api:
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-controller
|
||||
service_cluster_ip_range: 10.74.0.0/16
|
||||
# Expose a different port range for NodePort services
|
||||
service_node_port_range: 30000-40000
|
||||
always_pull_images: true
|
||||
pod_security_policy: false
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Enable audit log to stdout
|
||||
audit-log-path: "-"
|
||||
# Increase number of delete workers
|
||||
delete-collection-workers: 3
|
||||
# Set the level of log output to warning-level
|
||||
v: 1
|
||||
kube-controller:
|
||||
# CIDR pool used to assign IP addresses to pods in the cluster
|
||||
cluster_cidr: 10.100.0.0/16
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-api
|
||||
service_cluster_ip_range: 10.74.0.0/16
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Set the level of log output to debug-level
|
||||
v: 1
|
||||
# Enable RotateKubeletServerCertificate feature gate
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
# Enable TLS Certificates management
|
||||
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
|
||||
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
|
||||
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
|
||||
kubelet:
|
||||
# Base domain for the cluster
|
||||
cluster_domain: cluster.local
|
||||
# IP address for the DNS service endpoint
|
||||
cluster_dns_server: 10.74.0.10
|
||||
# Fail if swap is on
|
||||
fail_swap_on: false
|
||||
# Set max pods to 250 instead of default 110
|
||||
extra_binds:
|
||||
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
|
||||
extra_args:
|
||||
max-pods: 122
|
||||
# Optionally define additional volume binds to a service
|
||||
scheduler:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
kubeproxy:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
|
||||
authorization:
|
||||
mode: rbac
|
||||
|
||||
addon_job_timeout: 30
|
||||
|
||||
# Specify network plugin-in (canal, calico, flannel, weave, or none)
|
||||
network:
|
||||
mtu: 1440
|
||||
options:
|
||||
flannel_backend_type: vxlan
|
||||
plugin: calico
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/unreachable"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
|
||||
# Specify DNS provider (coredns or kube-dns)
|
||||
dns:
|
||||
provider: coredns
|
||||
nodelocal:
|
||||
ip_address: null
|
||||
node_selector: null
|
||||
update_strategy:
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 20%
|
||||
maxSurge: 15%
|
||||
linear_autoscaler_params:
|
||||
cores_per_replica: 0.34
|
||||
nodes_per_replica: 4
|
||||
prevent_single_point_failure: true
|
||||
min: 2
|
||||
max: 3
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/unreachable"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
|
||||
# Specify monitoring provider (metrics-server)
|
||||
monitoring:
|
||||
provider: metrics-server
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 8
|
||||
|
||||
ingress:
|
||||
provider: nginx
|
||||
default_backend: true
|
||||
http_port: 30500
|
||||
https_port: 31500
|
||||
extra_envs:
|
||||
- name: TZ
|
||||
value: Asia/Shanghai
|
||||
node_selector:
|
||||
ingress-deploy: true
|
||||
options:
|
||||
use-forwarded-headers: "true"
|
||||
access-log-path: /var/log/nginx/access.log
|
||||
client-body-timeout: '6000'
|
||||
compute-full-forwarded-for: 'true'
|
||||
enable-underscores-in-headers: 'true'
|
||||
log-format-escape-json: 'true'
|
||||
log-format-upstream: >-
|
||||
{ "msec": "$msec", "connection": "$connection", "connection_requests":
|
||||
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
|
||||
"request_length": "$request_length", "remote_addr": "$remote_addr",
|
||||
"remote_user": "$remote_user", "remote_port": "$remote_port",
|
||||
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
|
||||
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
|
||||
"request_uri": "$request_uri", "args": "$args", "status": "$status",
|
||||
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
|
||||
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
|
||||
"http_host": "$http_host", "server_name": "$server_name", "request_time":
|
||||
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
|
||||
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
|
||||
"upstream_response_time": "$upstream_response_time",
|
||||
"upstream_response_length": "$upstream_response_length",
|
||||
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
|
||||
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
|
||||
"request_method": "$request_method", "server_protocol": "$server_protocol",
|
||||
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
|
||||
"geoip_country_code": "$geoip_country_code" }
|
||||
proxy-body-size: 5120m
|
||||
proxy-read-timeout: '6000'
|
||||
proxy-send-timeout: '6000'
|
||||
|
||||
18
3-湘潭钢铁项目/1-base-env-shell/说明.md
Normal file
18
3-湘潭钢铁项目/1-base-env-shell/说明.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# 需要离线安装的功能
|
||||
|
||||
## 需要安装的功能
|
||||
|
||||
1. NFS的内容
|
||||
|
||||
|
||||
1. harbor
|
||||
1. 镜像
|
||||
1. 所有的rancher镜像
|
||||
2. minio镜像·
|
||||
3. srs镜像
|
||||
|
||||
1. SuperCyy.123
|
||||
1. cyy
|
||||
|
||||
|
||||
1.
|
||||
26
3-湘潭钢铁项目/2-helm-chart/.helmignore
Normal file
26
3-湘潭钢铁项目/2-helm-chart/.helmignore
Normal file
@@ -0,0 +1,26 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
.vscode/
|
||||
charts/frontend-app/templates/traefik.yaml
|
||||
charts/all-middleware/charts/rabbitmq-backup
|
||||
charts/minio-pv
|
||||
charts/minio-storage
|
||||
105
3-湘潭钢铁项目/2-helm-chart/1-pvc.yaml
Normal file
105
3-湘潭钢铁项目/2-helm-chart/1-pvc.yaml
Normal file
@@ -0,0 +1,105 @@
|
||||
---
|
||||
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: glusterfs-backend-log-pvc
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: glusterfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 3.1.0
|
||||
spec:
|
||||
storageClassName: nfs-storage-class
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: glusterfs-middleware-srs-pvc
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: glusterfs-middleware-srs-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 3.1.0
|
||||
spec:
|
||||
storageClassName: nfs-storage-class
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 300Gi
|
||||
---
|
||||
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-emqxs
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 3.1.0
|
||||
spec:
|
||||
storageClassName: nfs-storage-class
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-mongo
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 3.1.0
|
||||
spec:
|
||||
storageClassName: nfs-storage-class
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
---
|
||||
# Source: outside-deploy/charts/all-persistence-volume-claims/templates/pvc.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-rabbitmq
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 3.1.0
|
||||
spec:
|
||||
storageClassName: nfs-storage-class
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
|
||||
38
3-湘潭钢铁项目/2-helm-chart/1.yaml
Normal file
38
3-湘潭钢铁项目/2-helm-chart/1.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
upstream up_server {
|
||||
ip_hash;
|
||||
server 172.32.12.35:30500;
|
||||
server 172.32.12.36:30500;
|
||||
server 172.32.12.37:30500;
|
||||
}
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/version: 0.44.0
|
||||
helm.sh/chart: ingress-nginx-3.23.0
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: http
|
||||
nodePort: 30500
|
||||
- name: https
|
||||
protocol: TCP
|
||||
port: 443
|
||||
targetPort: https
|
||||
nodePort: 31500
|
||||
selector:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
externalTrafficPolicy: Cluster
|
||||
779
3-湘潭钢铁项目/2-helm-chart/2-ingress.yaml
Normal file
779
3-湘潭钢铁项目/2-helm-chart/2-ingress.yaml
Normal file
@@ -0,0 +1,779 @@
|
||||
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-platform
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "",
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-ai-brain
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "ai-brain",
|
||||
AppClientId: "APP_rafnuCAmBESIVYMH"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-base
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "base",
|
||||
AppClientId: "APP_9LY41OaKSqk2btY0"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-cms
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "cms",
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-cmsportal
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "cmsportal",
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-detection
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "detection",
|
||||
AppClientId: "APP_FDHW2VLVDWPnnOCy"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-emergency
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "emergency",
|
||||
AppClientId: "APP_aGsTAY1uMZrpKdfk"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-hyper
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "hyper",
|
||||
AppClientId: "APP_xbMkKdsbsbv8SH4w"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-logistics
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "logistics",
|
||||
AppClientId: "APP_PvdfRRRBPL8xbIwl"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-mws
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "mws",
|
||||
AppClientId: "APP_uKniXPELlRERBBwK"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-mws-admin
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "mws-admin",
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-oms
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "oms",
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-open
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "open",
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-security
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "security",
|
||||
AppClientId: "APP_JUSEMc7afyWXxvE7"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-share
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "share",
|
||||
AppClientId: "APP_4lVSVI0ZGxTssir8"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-splice
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "splice",
|
||||
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-splice-visual
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "splice-visual",
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-traffic
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "traffic",
|
||||
AppClientId: "APP_PvdfRRRBPL8xbIwl"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/configmap.yaml
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-visualization
|
||||
namespace: xtgt
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 浠嶤onfigMap涓敞鍏?
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "172.32.12.34",
|
||||
ApplicationShortName: "visualization",
|
||||
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
|
||||
}
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/ingress-api-gateway.yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: all-gateways-ingress
|
||||
namespace: xtgt
|
||||
labels:
|
||||
type: api-gateway
|
||||
accessmode: pulic
|
||||
helm.sh/chart: all-ingress-config-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
spec:
|
||||
rules:
|
||||
- host: xtgt.fakedomain.io
|
||||
http:
|
||||
paths:
|
||||
- path: /oms/api/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-admin-gateway
|
||||
servicePort: 8080
|
||||
- path: /open/api/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-open-gateway
|
||||
servicePort: 8080
|
||||
- path: /api/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-gateway
|
||||
servicePort: 8080
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/ingress-api-gateway.yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: all-apis-ingress
|
||||
namespace: xtgt
|
||||
labels:
|
||||
type: api-gateway
|
||||
accessmode: pulic
|
||||
helm.sh/chart: all-ingress-config-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
spec:
|
||||
rules:
|
||||
- host: xtgt.fakedomain.io
|
||||
http:
|
||||
paths:
|
||||
- path: /?(.*)/api/warehouses/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-material-warehouse
|
||||
servicePort: 8080
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/ingress-backend.yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: backend-applications-ingress
|
||||
namespace: xtgt
|
||||
labels:
|
||||
type: backend
|
||||
accessmode: internal
|
||||
helm.sh/chart: all-ingress-config-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
spec:
|
||||
rules:
|
||||
- host: cmii-admin-data.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-admin-data
|
||||
servicePort: 8080
|
||||
- host: cmii-admin-gateway.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-admin-gateway
|
||||
servicePort: 8080
|
||||
- host: cmii-admin-user.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-admin-user
|
||||
servicePort: 8080
|
||||
- host: cmii-open-gateway.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-open-gateway
|
||||
servicePort: 8080
|
||||
- host: cmii-project-minio.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-project-minio
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-airspace.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-airspace
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-brain.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-brain
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-clusters.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-clusters
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-cms.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-cms
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-data-post-process.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-data-post-process
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-developer.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-developer
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-device.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-device
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-gateway.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-gateway
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-industrial-portfolio.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-industrial-portfolio
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-kpi-monitor.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-kpi-monitor
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-live.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-live
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-logger.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-logger
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-material-warehouse.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-material-warehouse
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-mission.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-mission
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-monitor.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-monitor
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-mqtthandler.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-mqtthandler
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-notice.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-notice
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-oauth.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-oauth
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-process.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-process
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-security-system.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-security-system
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-surveillance.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-surveillance
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-user.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-user
|
||||
servicePort: 8080
|
||||
- host: cmii-uav-waypoint.uavcloud-outside.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cmii-uav-waypoint
|
||||
servicePort: 8080
|
||||
---
|
||||
# Source: outside-deploy/charts/all-ingress-config/templates/ingress-frontend.yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: frontend-applications-ingress
|
||||
namespace: xtgt
|
||||
labels:
|
||||
type: frontend
|
||||
accessmode: public
|
||||
helm.sh/chart: all-ingress-config-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
rewrite ^(/green)$ $1/ redirect;
|
||||
rewrite ^(/supervision)$ $1/ redirect;
|
||||
rewrite ^(/inspection)$ $1/ redirect;
|
||||
rewrite ^(/pangu)$ $1/ redirect;
|
||||
rewrite ^(/ai-brain)$ $1/ redirect;
|
||||
rewrite ^(/base)$ $1/ redirect;
|
||||
rewrite ^(/cms)$ $1/ redirect;
|
||||
rewrite ^(/cmsportal)$ $1/ redirect;
|
||||
rewrite ^(/detection)$ $1/ redirect;
|
||||
rewrite ^(/emergency)$ $1/ redirect;
|
||||
rewrite ^(/hyper)$ $1/ redirect;
|
||||
rewrite ^(/logistics)$ $1/ redirect;
|
||||
rewrite ^(/mws)$ $1/ redirect;
|
||||
rewrite ^(/mws-admin)$ $1/ redirect;
|
||||
rewrite ^(/oms)$ $1/ redirect;
|
||||
rewrite ^(/open)$ $1/ redirect;
|
||||
rewrite ^(/security)$ $1/ redirect;
|
||||
rewrite ^(/share)$ $1/ redirect;
|
||||
rewrite ^(/splice)$ $1/ redirect;
|
||||
rewrite ^(/splice-visual)$ $1/ redirect;
|
||||
rewrite ^(/traffic)$ $1/ redirect;
|
||||
rewrite ^(/visualization)$ $1/ redirect;
|
||||
spec:
|
||||
rules:
|
||||
- host: xtgt.fakedomain.io
|
||||
http:
|
||||
paths:
|
||||
- path: /inspection/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /supervision/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /green/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /pangu/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /ai-brain/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-ai-brain
|
||||
servicePort: 9528
|
||||
- path: /base/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-base
|
||||
servicePort: 9528
|
||||
- path: /cms/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-cms
|
||||
servicePort: 9528
|
||||
- path: /cmsportal/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-cms-portal
|
||||
servicePort: 9528
|
||||
- path: /detection/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-detection
|
||||
servicePort: 9528
|
||||
- path: /emergency/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-emergency-rescue
|
||||
servicePort: 9528
|
||||
- path: /hyper/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-hyperspectral
|
||||
servicePort: 9528
|
||||
- path: /logistics/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-logistics
|
||||
servicePort: 9528
|
||||
- path: /mws/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-mws
|
||||
servicePort: 9528
|
||||
- path: /mws-admin/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-mws-admin
|
||||
servicePort: 9528
|
||||
- path: /oms/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-oms
|
||||
servicePort: 9528
|
||||
- path: /open/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-open
|
||||
servicePort: 9528
|
||||
- path: /security/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-security
|
||||
servicePort: 9528
|
||||
- path: /share/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-share
|
||||
servicePort: 9528
|
||||
- path: /splice/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-splice
|
||||
servicePort: 9528
|
||||
- path: /splice-visual/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-splice-visual
|
||||
servicePort: 9528
|
||||
- path: /traffic/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-traffic-screen
|
||||
servicePort: 9528
|
||||
- path: /visualization/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-visualization
|
||||
servicePort: 9528
|
||||
17
3-湘潭钢铁项目/2-helm-chart/2.yaml
Normal file
17
3-湘潭钢铁项目/2-helm-chart/2.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-logistic-application
|
||||
namespace: uavcloud-devflight
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 从ConfigMap中注入
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "devflight",
|
||||
CloudHOST: "lab.uavcmlc.com",
|
||||
ApplicationShortName: "logistic",
|
||||
AppClientId: "APP_PvdfRRRBPL8xbIwl"
|
||||
}
|
||||
389
3-湘潭钢铁项目/2-helm-chart/3-0-mysql.yaml
Normal file
389
3-湘潭钢铁项目/2-helm-chart/3-0-mysql.yaml
Normal file
@@ -0,0 +1,389 @@
|
||||
---
|
||||
# Source: outside-deploy/charts/mysql-db/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: helm-mysql
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
annotations:
|
||||
secrets:
|
||||
- name: helm-mysql
|
||||
---
|
||||
# Source: outside-deploy/charts/mysql-db/templates/secrets.yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: helm-mysql
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
type: Opaque
|
||||
data:
|
||||
mysql-root-password: "UXpmWFFoZDNiUQ=="
|
||||
mysql-password: "S0F0cm5PckFKNw=="
|
||||
---
|
||||
# Source: outside-deploy/charts/mysql-db/templates/primary/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-mysql
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
app.kubernetes.io/component: primary
|
||||
data:
|
||||
my.cnf: |-
|
||||
|
||||
[mysqld]
|
||||
port=3306
|
||||
basedir=/opt/bitnami/mysql
|
||||
datadir=/bitnami/mysql/data
|
||||
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
|
||||
socket=/opt/bitnami/mysql/tmp/mysql.sock
|
||||
log-error=/bitnami/mysql/data/error.log
|
||||
general_log_file = /bitnami/mysql/data/general.log
|
||||
slow_query_log_file = /bitnami/mysql/data/slow.log
|
||||
innodb_data_file_path = ibdata1:512M:autoextend
|
||||
innodb_buffer_pool_size = 512M
|
||||
innodb_buffer_pool_instances = 2
|
||||
innodb_log_file_size = 512M
|
||||
innodb_log_files_in_group = 4
|
||||
innodb_log_files_in_group = 4
|
||||
log-bin = /bitnami/mysql/data/mysql-bin
|
||||
max_binlog_size=1G
|
||||
transaction_isolation = REPEATABLE-READ
|
||||
default_storage_engine = innodb
|
||||
character-set-server = utf8mb4
|
||||
collation-server=utf8mb4_bin
|
||||
binlog_format = ROW
|
||||
binlog_rows_query_log_events=on
|
||||
binlog_cache_size=4M
|
||||
binlog_expire_logs_seconds = 1296000
|
||||
max_binlog_cache_size=2G
|
||||
gtid_mode = on
|
||||
enforce_gtid_consistency = 1
|
||||
sync_binlog = 1
|
||||
innodb_flush_log_at_trx_commit = 1
|
||||
innodb_flush_method = O_DIRECT
|
||||
log_slave_updates=1
|
||||
relay_log_recovery = 1
|
||||
relay-log-purge = 1
|
||||
default_time_zone = '+08:00'
|
||||
lower_case_table_names=1
|
||||
log_bin_trust_function_creators=1
|
||||
group_concat_max_len=67108864
|
||||
innodb_io_capacity = 4000
|
||||
innodb_io_capacity_max = 8000
|
||||
innodb_flush_sync = 0
|
||||
innodb_flush_neighbors = 0
|
||||
innodb_write_io_threads = 8
|
||||
innodb_read_io_threads = 8
|
||||
innodb_purge_threads = 4
|
||||
innodb_page_cleaners = 4
|
||||
innodb_open_files = 65535
|
||||
innodb_max_dirty_pages_pct = 50
|
||||
innodb_lru_scan_depth = 4000
|
||||
innodb_checksum_algorithm = crc32
|
||||
innodb_lock_wait_timeout = 10
|
||||
innodb_rollback_on_timeout = 1
|
||||
innodb_print_all_deadlocks = 1
|
||||
innodb_file_per_table = 1
|
||||
innodb_online_alter_log_max_size = 4G
|
||||
innodb_stats_on_metadata = 0
|
||||
innodb_thread_concurrency = 0
|
||||
innodb_sync_spin_loops = 100
|
||||
innodb_spin_wait_delay = 30
|
||||
lock_wait_timeout = 3600
|
||||
slow_query_log = 1
|
||||
long_query_time = 10
|
||||
log_queries_not_using_indexes =1
|
||||
log_throttle_queries_not_using_indexes = 60
|
||||
min_examined_row_limit = 100
|
||||
log_slow_admin_statements = 1
|
||||
log_slow_slave_statements = 1
|
||||
default_authentication_plugin=mysql_native_password
|
||||
skip-name-resolve=1
|
||||
explicit_defaults_for_timestamp=1
|
||||
plugin_dir=/opt/bitnami/mysql/plugin
|
||||
max_allowed_packet=128M
|
||||
max_connections = 2000
|
||||
max_connect_errors = 1000000
|
||||
table_definition_cache=2000
|
||||
table_open_cache_instances=64
|
||||
tablespace_definition_cache=1024
|
||||
thread_cache_size=256
|
||||
interactive_timeout = 600
|
||||
wait_timeout = 600
|
||||
tmpdir=/opt/bitnami/mysql/tmp
|
||||
max_allowed_packet=32M
|
||||
bind-address=0.0.0.0
|
||||
performance_schema = 1
|
||||
performance_schema_instrument = '%memory%=on'
|
||||
performance_schema_instrument = '%lock%=on'
|
||||
innodb_monitor_enable=ALL
|
||||
|
||||
[mysql]
|
||||
no-auto-rehash
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
max_allowed_packet = 32M
|
||||
|
||||
[client]
|
||||
port=3306
|
||||
socket=/opt/bitnami/mysql/tmp/mysql.sock
|
||||
default-character-set=UTF8
|
||||
plugin_dir=/opt/bitnami/mysql/plugin
|
||||
|
||||
[manager]
|
||||
port=3306
|
||||
socket=/opt/bitnami/mysql/tmp/mysql.sock
|
||||
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
|
||||
---
|
||||
# Source: outside-deploy/charts/mysql-db/templates/primary/initialization-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-mysql-init-scripts
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
app.kubernetes.io/component: primary
|
||||
data:
|
||||
create_users_grants_core.sql: |-
|
||||
create user zyly@'%' identified by 'Cmii@451315';
|
||||
grant select on *.* to zyly@'%';
|
||||
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
|
||||
grant all on *.* to zyly_qc@'%';
|
||||
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
|
||||
grant all on *.* to k8s_admin@'%';
|
||||
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
|
||||
grant all on *.* to audit_dba@'%';
|
||||
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
|
||||
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
|
||||
create user monitor@'%' identified by 'PL3#nGtrWbf-';
|
||||
grant REPLICATION CLIENT on *.* to monitor@'%';
|
||||
flush privileges;
|
||||
---
|
||||
# Source: outside-deploy/charts/mysql-db/templates/primary/svc-headless.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mysql-headless
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
annotations:
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: true
|
||||
ports:
|
||||
- name: mysql
|
||||
port: 3306
|
||||
targetPort: mysql
|
||||
selector:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
---
|
||||
# Source: outside-deploy/charts/mysql-db/templates/primary/svc.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mysql
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
annotations:
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: mysql
|
||||
port: 3306
|
||||
protocol: TCP
|
||||
targetPort: mysql
|
||||
nodePort: null
|
||||
selector:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
---
|
||||
# Source: outside-deploy/charts/mysql-db/templates/primary/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mysql
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
serviceName: helm-mysql
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
spec:
|
||||
|
||||
serviceAccountName: helm-mysql
|
||||
affinity: {}
|
||||
nodeSelector:
|
||||
mysql-deploy: "true"
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
initContainers:
|
||||
- name: change-volume-permissions
|
||||
image: "172.32.12.34:8033/cmii/bitnami-shell:10-debian-10-r140"
|
||||
imagePullPolicy: "Always"
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
chown -R 1001:1001 /bitnami/mysql
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumeMounts:
|
||||
- name: mysql-data
|
||||
mountPath: /bitnami/mysql
|
||||
containers:
|
||||
- name: mysql
|
||||
image: "172.32.12.34:8033/cmii/mysql:8.0.26-debian-10-r0"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
env:
|
||||
- name: BITNAMI_DEBUG
|
||||
value: "false"
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: helm-mysql
|
||||
key: mysql-root-password
|
||||
- name: MYSQL_DATABASE
|
||||
value: "cmii"
|
||||
ports:
|
||||
- name: mysql
|
||||
containerPort: 3306
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 3
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
password_aux="${MYSQL_ROOT_PASSWORD:-}"
|
||||
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
|
||||
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
|
||||
fi
|
||||
mysqladmin status -uroot -p"${password_aux}"
|
||||
readinessProbe:
|
||||
failureThreshold: 5
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 3
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
password_aux="${MYSQL_ROOT_PASSWORD:-}"
|
||||
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
|
||||
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
|
||||
fi
|
||||
mysqladmin status -uroot -p"${password_aux}"
|
||||
startupProbe:
|
||||
failureThreshold: 60
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
password_aux="${MYSQL_ROOT_PASSWORD:-}"
|
||||
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
|
||||
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
|
||||
fi
|
||||
mysqladmin status -uroot -p"${password_aux}"
|
||||
resources:
|
||||
limits: {}
|
||||
requests: {}
|
||||
volumeMounts:
|
||||
- name: mysql-data
|
||||
mountPath: /bitnami/mysql
|
||||
- name: custom-init-scripts
|
||||
mountPath: /docker-entrypoint-initdb.d
|
||||
- name: config
|
||||
mountPath: /opt/bitnami/mysql/conf/my.cnf
|
||||
subPath: my.cnf
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: helm-mysql
|
||||
- name: custom-init-scripts
|
||||
configMap:
|
||||
name: helm-mysql-init-scripts
|
||||
- name: mysql-data
|
||||
hostPath:
|
||||
path: /var/lib/docker/mysql-pv
|
||||
856
3-湘潭钢铁项目/2-helm-chart/3-1-mid.yaml
Normal file
856
3-湘潭钢铁项目/2-helm-chart/3-1-mid.yaml
Normal file
@@ -0,0 +1,856 @@
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/rbac-cluster.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: xtgt
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
automountServiceAccountToken: true
|
||||
secrets:
|
||||
- name: helm-rabbitmq
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/secrets.yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
type: Opaque
|
||||
data:
|
||||
rabbitmq-password: "blljUk45MXIuX2hq"
|
||||
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/configmap-cluster.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-emqxs-env
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
helm.sh/chart: emqx-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
data:
|
||||
EMQX_CLUSTER__K8S__ADDRESS_TYPE: hostname
|
||||
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc:443
|
||||
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
|
||||
EMQX_NAME: helm-emqxs
|
||||
EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs
|
||||
EMQX_CLUSTER__DISCOVERY: k8s
|
||||
EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless
|
||||
EMQX_CLUSTER__K8S__namespace: xtgt
|
||||
EMQX_ALLOW_ANONYMOUS: "false"
|
||||
EMQX_ACL_NOMATCH: "deny"
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/configmap-cluster.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-emqxs-cm
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
helm.sh/chart: emqx-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
data:
|
||||
emqx_auth_username.conf: |-
|
||||
auth.user.1.username = cmlc
|
||||
auth.user.1.password = odD8#Ve7.B
|
||||
auth.user.password_hash = sha256
|
||||
|
||||
acl.conf: |-
|
||||
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
|
||||
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
|
||||
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
|
||||
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
|
||||
{allow, all}.
|
||||
|
||||
loaded_plugins: |-
|
||||
{emqx_auth_username,true}.
|
||||
{emqx_management, true}.
|
||||
{emqx_recon, true}.
|
||||
{emqx_retainer, false}.
|
||||
{emqx_dashboard, true}.
|
||||
{emqx_telemetry, true}.
|
||||
{emqx_rule_engine, true}.
|
||||
{emqx_bridge_mqtt, false}.
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/nacos/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-nacos-cm
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: nacos-1.1.1
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
data:
|
||||
mysql.db.name: "nacos_config"
|
||||
mysql.db.host: "helm-mysql"
|
||||
mysql.port: "3306"
|
||||
mysql.user: "k8s_admin"
|
||||
mysql.password: "EWde2cKP9w.G"
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/configuration.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-rabbitmq-config
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
data:
|
||||
rabbitmq.conf: |-
|
||||
## Username and password
|
||||
##
|
||||
default_user = admin
|
||||
default_pass = nYcRN91r._hj
|
||||
## Clustering
|
||||
##
|
||||
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
|
||||
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
|
||||
cluster_formation.node_cleanup.interval = 10
|
||||
cluster_formation.node_cleanup.only_log_warning = true
|
||||
cluster_partition_handling = autoheal
|
||||
# queue master locator
|
||||
queue_master_locator = min-masters
|
||||
# enable guest user
|
||||
loopback_users.guest = false
|
||||
#default_vhost = default-vhost
|
||||
#disk_free_limit.absolute = 50MB
|
||||
#load_definitions = /app/load_definition.json
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/rbac-cluster.yaml
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: xtgt
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/role.yaml
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: helm-rabbitmq-endpoint-reader
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/rbac-cluster.yaml
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: xtgt
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: helm-emqxs
|
||||
namespace: xtgt
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: helm-emqxs
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/rolebinding.yaml
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: helm-rabbitmq-endpoint-reader
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: helm-rabbitmq
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: helm-rabbitmq-endpoint-reader
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/svc-cluster.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
helm.sh/chart: emqx-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
ports:
|
||||
- port: 1883
|
||||
name: mqtt
|
||||
targetPort: 1883
|
||||
nodePort: 31883
|
||||
- port: 18083
|
||||
name: dashboard
|
||||
targetPort: 18083
|
||||
nodePort: 38085
|
||||
- port: 8083
|
||||
name: mqtt-websocket
|
||||
targetPort: 8083
|
||||
nodePort: 38083
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/svc-headless.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-emqxs-headless
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
helm.sh/chart: emqx-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
selector:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
ports:
|
||||
- name: mqtt
|
||||
port: 1883
|
||||
protocol: TCP
|
||||
targetPort: 1883
|
||||
- name: mqttssl
|
||||
port: 8883
|
||||
protocol: TCP
|
||||
targetPort: 8883
|
||||
- name: mgmt
|
||||
port: 8081
|
||||
protocol: TCP
|
||||
targetPort: 8081
|
||||
- name: websocket
|
||||
port: 8083
|
||||
protocol: TCP
|
||||
targetPort: 8083
|
||||
- name: wss
|
||||
port: 8084
|
||||
protocol: TCP
|
||||
targetPort: 8084
|
||||
- name: dashboard
|
||||
port: 18083
|
||||
protocol: TCP
|
||||
targetPort: 18083
|
||||
- name: ekka
|
||||
port: 4370
|
||||
protocol: TCP
|
||||
targetPort: 4370
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/mongo/templates/svc.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/nacos/templates/svc.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: nacos-1.1.1
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 8848
|
||||
name: server
|
||||
targetPort: 8848
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/svc-headless.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-rabbitmq-headless
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: epmd
|
||||
port: 4369
|
||||
targetPort: epmd
|
||||
- name: amqp
|
||||
port: 5672
|
||||
targetPort: amqp
|
||||
- name: dist
|
||||
port: 25672
|
||||
targetPort: dist
|
||||
- name: dashboard
|
||||
port: 15672
|
||||
targetPort: stats
|
||||
selector:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
app.kubernetes.io/release: xtgt
|
||||
publishNotReadyAddresses: true
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/svc.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: amqp
|
||||
port: 5672
|
||||
targetPort: amqp
|
||||
- name: dashboard
|
||||
port: 15672
|
||||
targetPort: dashboard
|
||||
selector:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
app.kubernetes.io/release: xtgt
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/emqx/templates/cluster/statefulset-cluster.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
helm.sh/chart: emqx-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
replicas: 3
|
||||
serviceName: helm-emqxs-headless
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.type: middleware
|
||||
cmii.app: helm-emqxs
|
||||
cmii.emqx.architecture: cluster
|
||||
helm.sh/chart: emqx-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: uavcloud.env
|
||||
operator: In
|
||||
values:
|
||||
- "demo"
|
||||
serviceAccountName: helm-emqxs
|
||||
containers:
|
||||
- name: helm-emqxs
|
||||
image: "172.32.12.34:8033/cmii/emqx:4.2.12"
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: mqtt
|
||||
containerPort: 1883
|
||||
- name: mqttssl
|
||||
containerPort: 8883
|
||||
- name: mgmt
|
||||
containerPort: 8081
|
||||
- name: ws
|
||||
containerPort: 8083
|
||||
- name: wss
|
||||
containerPort: 8084
|
||||
- name: dashboard
|
||||
containerPort: 18083
|
||||
- name: ekka
|
||||
containerPort: 4370
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: helm-emqxs-env
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 300m
|
||||
memory: 1Gi
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumeMounts:
|
||||
- name: emqx-data
|
||||
mountPath: "/opt/emqx/data/mnesia"
|
||||
readOnly: false
|
||||
- name: helm-emqxs-cm
|
||||
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
|
||||
subPath: emqx_auth_username.conf
|
||||
readOnly: false
|
||||
- name: helm-emqxs-cm
|
||||
mountPath: "/opt/emqx/etc/acl.conf"
|
||||
subPath: "acl.conf"
|
||||
readOnly: false
|
||||
- name: helm-emqxs-cm
|
||||
mountPath: "/opt/emqx/data/loaded_plugins"
|
||||
subPath: loaded_plugins
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: emqx-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-emqxs
|
||||
- name: helm-emqxs-cm
|
||||
configMap:
|
||||
name: helm-emqxs-cm
|
||||
items:
|
||||
- key: emqx_auth_username.conf
|
||||
path: emqx_auth_username.conf
|
||||
- key: acl.conf
|
||||
path: acl.conf
|
||||
- key: loaded_plugins
|
||||
path: loaded_plugins
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/mongo/templates/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
serviceName: helm-mongo
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: uavcloud.env
|
||||
operator: In
|
||||
values:
|
||||
- "demo"
|
||||
containers:
|
||||
- name: helm-mongo
|
||||
image: "172.32.12.34:8033/cmii/mongo:5.0"
|
||||
resources:
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: "1"
|
||||
memory: 2Gi
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: cmlc
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: REdPza8#oVlt
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: default/helm-mongo/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/nacos/templates/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: nacos-1.1.1
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
spec:
|
||||
serviceName: helm-nacos
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: nacos-1.1.1
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "3.1.0"
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: uavcloud.env
|
||||
operator: In
|
||||
values:
|
||||
- "demo"
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: "172.32.12.34:8033/cmii/nacos-server:2.0.1"
|
||||
ports:
|
||||
- containerPort: 8848
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "true"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "8848"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "8848"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 1Gi
|
||||
---
|
||||
# Source: outside-deploy/charts/all-middleware/charts/rabbitmq/templates/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
spec:
|
||||
serviceName: helm-rabbitmq-headless
|
||||
podManagementPolicy: OrderedReady
|
||||
replicas: 1
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
app.kubernetes.io/release: xtgt
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: helm-rabbitmq
|
||||
helm.sh/chart: rabbitmq-8.26.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: rabbitmq
|
||||
annotations:
|
||||
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
|
||||
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
|
||||
spec:
|
||||
|
||||
serviceAccountName: helm-rabbitmq
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: uavcloud.env
|
||||
operator: In
|
||||
values:
|
||||
- "demo"
|
||||
securityContext:
|
||||
fsGroup: 5001
|
||||
runAsUser: 5001
|
||||
terminationGracePeriodSeconds: 120
|
||||
initContainers:
|
||||
- name: volume-permissions
|
||||
image: "172.32.12.34:8033/cmii/bitnami-shell:10-debian-10-r140"
|
||||
imagePullPolicy: "Always"
|
||||
command:
|
||||
- /bin/bash
|
||||
args:
|
||||
- -ec
|
||||
- |
|
||||
mkdir -p "/bitnami/rabbitmq/mnesia"
|
||||
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
resources:
|
||||
limits: {}
|
||||
requests: {}
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /bitnami/rabbitmq/mnesia
|
||||
containers:
|
||||
- name: rabbitmq
|
||||
image: "172.32.12.34:8033/cmii/rabbitmq:3.9.12-debian-10-r3"
|
||||
imagePullPolicy: "Always"
|
||||
env:
|
||||
- name: BITNAMI_DEBUG
|
||||
value: "false"
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: K8S_SERVICE_NAME
|
||||
value: "helm-rabbitmq-headless"
|
||||
- name: K8S_ADDRESS_TYPE
|
||||
value: hostname
|
||||
- name: RABBITMQ_FORCE_BOOT
|
||||
value: "no"
|
||||
- name: RABBITMQ_NODE_NAME
|
||||
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
|
||||
- name: K8S_HOSTNAME_SUFFIX
|
||||
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
|
||||
- name: RABBITMQ_MNESIA_DIR
|
||||
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
|
||||
- name: RABBITMQ_LDAP_ENABLE
|
||||
value: "no"
|
||||
- name: RABBITMQ_LOGS
|
||||
value: "-"
|
||||
- name: RABBITMQ_ULIMIT_NOFILES
|
||||
value: "65536"
|
||||
- name: RABBITMQ_USE_LONGNAME
|
||||
value: "true"
|
||||
- name: RABBITMQ_ERL_COOKIE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: helm-rabbitmq
|
||||
key: rabbitmq-erlang-cookie
|
||||
- name: RABBITMQ_LOAD_DEFINITIONS
|
||||
value: "no"
|
||||
- name: RABBITMQ_SECURE_PASSWORD
|
||||
value: "yes"
|
||||
- name: RABBITMQ_USERNAME
|
||||
value: "admin"
|
||||
- name: RABBITMQ_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: helm-rabbitmq
|
||||
key: rabbitmq-password
|
||||
- name: RABBITMQ_PLUGINS
|
||||
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
|
||||
ports:
|
||||
- name: amqp
|
||||
containerPort: 5672
|
||||
- name: dist
|
||||
containerPort: 25672
|
||||
- name: dashboard
|
||||
containerPort: 15672
|
||||
- name: epmd
|
||||
containerPort: 4369
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- rabbitmq-diagnostics -q ping
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 20
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 20
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
|
||||
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
|
||||
else
|
||||
rabbitmqctl stop_app
|
||||
fi
|
||||
resources:
|
||||
limits: {}
|
||||
requests: {}
|
||||
volumeMounts:
|
||||
- name: configuration
|
||||
mountPath: /bitnami/rabbitmq/conf
|
||||
- name: data
|
||||
mountPath: /bitnami/rabbitmq/mnesia
|
||||
volumes:
|
||||
- name: configuration
|
||||
configMap:
|
||||
name: helm-rabbitmq-config
|
||||
items:
|
||||
- key: rabbitmq.conf
|
||||
path: rabbitmq.conf
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-rabbitmq
|
||||
1215
3-湘潭钢铁项目/2-helm-chart/3-mid.yaml
Normal file
1215
3-湘潭钢铁项目/2-helm-chart/3-mid.yaml
Normal file
File diff suppressed because it is too large
Load Diff
598
3-湘潭钢铁项目/2-helm-chart/4-redi.yaml
Normal file
598
3-湘潭钢铁项目/2-helm-chart/4-redi.yaml
Normal file
@@ -0,0 +1,598 @@
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
automountServiceAccountToken: true
|
||||
metadata:
|
||||
name: helm-redis
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/secret.yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: helm-redis
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
type: Opaque
|
||||
data:
|
||||
redis-password: "TWNhY2hlQDQ1MjI="
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-redis-configuration
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
data:
|
||||
redis.conf: |-
|
||||
# User-supplied common configuration:
|
||||
# Enable AOF https://redis.io/topics/persistence#append-only-file
|
||||
appendonly yes
|
||||
# Disable RDB persistence, AOF persistence already enabled.
|
||||
save ""
|
||||
# End of common configuration
|
||||
master.conf: |-
|
||||
dir /data
|
||||
# User-supplied master configuration:
|
||||
rename-command FLUSHDB ""
|
||||
rename-command FLUSHALL ""
|
||||
# End of master configuration
|
||||
replica.conf: |-
|
||||
dir /data
|
||||
slave-read-only yes
|
||||
# User-supplied replica configuration:
|
||||
rename-command FLUSHDB ""
|
||||
rename-command FLUSHALL ""
|
||||
# End of replica configuration
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-redis-health
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
data:
|
||||
ping_readiness_local.sh: |-
|
||||
#!/bin/bash
|
||||
|
||||
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
|
||||
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
|
||||
response=$(
|
||||
timeout -s 3 $1 \
|
||||
redis-cli \
|
||||
-h localhost \
|
||||
-p $REDIS_PORT \
|
||||
ping
|
||||
)
|
||||
if [ "$response" != "PONG" ]; then
|
||||
echo "$response"
|
||||
exit 1
|
||||
fi
|
||||
ping_liveness_local.sh: |-
|
||||
#!/bin/bash
|
||||
|
||||
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
|
||||
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
|
||||
response=$(
|
||||
timeout -s 3 $1 \
|
||||
redis-cli \
|
||||
-h localhost \
|
||||
-p $REDIS_PORT \
|
||||
ping
|
||||
)
|
||||
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
|
||||
echo "$response"
|
||||
exit 1
|
||||
fi
|
||||
ping_readiness_master.sh: |-
|
||||
#!/bin/bash
|
||||
|
||||
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
|
||||
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
|
||||
response=$(
|
||||
timeout -s 3 $1 \
|
||||
redis-cli \
|
||||
-h $REDIS_MASTER_HOST \
|
||||
-p $REDIS_MASTER_PORT_NUMBER \
|
||||
ping
|
||||
)
|
||||
if [ "$response" != "PONG" ]; then
|
||||
echo "$response"
|
||||
exit 1
|
||||
fi
|
||||
ping_liveness_master.sh: |-
|
||||
#!/bin/bash
|
||||
|
||||
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
|
||||
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
|
||||
response=$(
|
||||
timeout -s 3 $1 \
|
||||
redis-cli \
|
||||
-h $REDIS_MASTER_HOST \
|
||||
-p $REDIS_MASTER_PORT_NUMBER \
|
||||
ping
|
||||
)
|
||||
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
|
||||
echo "$response"
|
||||
exit 1
|
||||
fi
|
||||
ping_readiness_local_and_master.sh: |-
|
||||
script_dir="$(dirname "$0")"
|
||||
exit_status=0
|
||||
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
|
||||
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
|
||||
exit $exit_status
|
||||
ping_liveness_local_and_master.sh: |-
|
||||
script_dir="$(dirname "$0")"
|
||||
exit_status=0
|
||||
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
|
||||
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
|
||||
exit $exit_status
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-redis-scripts
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
data:
|
||||
start-master.sh: |
|
||||
#!/bin/bash
|
||||
|
||||
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
|
||||
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
|
||||
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
|
||||
fi
|
||||
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
|
||||
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
|
||||
fi
|
||||
ARGS=("--port" "${REDIS_PORT}")
|
||||
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
|
||||
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
|
||||
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
|
||||
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
|
||||
exec redis-server "${ARGS[@]}"
|
||||
start-replica.sh: |
|
||||
#!/bin/bash
|
||||
|
||||
get_port() {
|
||||
hostname="$1"
|
||||
type="$2"
|
||||
|
||||
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
|
||||
port=${!port_var}
|
||||
|
||||
if [ -z "$port" ]; then
|
||||
case $type in
|
||||
"SENTINEL")
|
||||
echo 26379
|
||||
;;
|
||||
"REDIS")
|
||||
echo 6379
|
||||
;;
|
||||
esac
|
||||
else
|
||||
echo $port
|
||||
fi
|
||||
}
|
||||
|
||||
get_full_hostname() {
|
||||
hostname="$1"
|
||||
echo "${hostname}.${HEADLESS_SERVICE}"
|
||||
}
|
||||
|
||||
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
|
||||
|
||||
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
|
||||
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
|
||||
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
|
||||
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
|
||||
fi
|
||||
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
|
||||
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
|
||||
fi
|
||||
|
||||
echo "" >> /opt/bitnami/redis/etc/replica.conf
|
||||
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
|
||||
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
|
||||
ARGS=("--port" "${REDIS_PORT}")
|
||||
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
|
||||
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
|
||||
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
|
||||
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
|
||||
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
|
||||
exec redis-server "${ARGS[@]}"
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-redis-headless
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: tcp-redis
|
||||
port: 6379
|
||||
targetPort: redis
|
||||
selector:
|
||||
app.kubernetes.io/name: redis-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-redis-master
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
cmii.type: middleware
|
||||
cmii.app: redis
|
||||
app.kubernetes.io/component: master
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
||||
ports:
|
||||
- name: tcp-redis
|
||||
port: 6379
|
||||
targetPort: redis
|
||||
nodePort: null
|
||||
selector:
|
||||
app.kubernetes.io/name: redis-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
cmii.type: middleware
|
||||
cmii.app: redis
|
||||
app.kubernetes.io/component: master
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-redis-replicas
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
app.kubernetes.io/component: replica
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: tcp-redis
|
||||
port: 6379
|
||||
targetPort: redis
|
||||
nodePort: null
|
||||
selector:
|
||||
app.kubernetes.io/name: redis-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/component: replica
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-redis-master
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
cmii.type: middleware
|
||||
cmii.app: redis
|
||||
app.kubernetes.io/component: master
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
cmii.type: middleware
|
||||
cmii.app: redis
|
||||
app.kubernetes.io/component: master
|
||||
serviceName: helm-redis-headless
|
||||
updateStrategy:
|
||||
rollingUpdate: {}
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
cmii.type: middleware
|
||||
cmii.app: redis
|
||||
app.kubernetes.io/component: master
|
||||
annotations:
|
||||
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
|
||||
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
|
||||
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
|
||||
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: uavcloud.env
|
||||
operator: In
|
||||
values:
|
||||
- "demo"
|
||||
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
serviceAccountName: helm-redis
|
||||
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: redis
|
||||
image: "172.32.12.34:8033/cmii/redis:6.2.6-debian-10-r0"
|
||||
imagePullPolicy: "Always"
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
command:
|
||||
- /bin/bash
|
||||
args:
|
||||
- -c
|
||||
- /opt/bitnami/scripts/start-scripts/start-master.sh
|
||||
env:
|
||||
- name: BITNAMI_DEBUG
|
||||
value: "false"
|
||||
- name: REDIS_REPLICATION_MODE
|
||||
value: master
|
||||
- name: ALLOW_EMPTY_PASSWORD
|
||||
value: "no"
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: helm-redis
|
||||
key: redis-password
|
||||
- name: REDIS_TLS_ENABLED
|
||||
value: "no"
|
||||
- name: REDIS_PORT
|
||||
value: "6379"
|
||||
ports:
|
||||
- name: redis
|
||||
containerPort: 6379
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 5
|
||||
# One second longer than command timeout should prevent generation of zombie processes.
|
||||
timeoutSeconds: 6
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- /health/ping_liveness_local.sh 5
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 2
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- /health/ping_readiness_local.sh 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: 8Gi
|
||||
requests:
|
||||
cpu: "1"
|
||||
memory: 4Gi
|
||||
volumeMounts:
|
||||
- name: start-scripts
|
||||
mountPath: /opt/bitnami/scripts/start-scripts
|
||||
- name: health
|
||||
mountPath: /health
|
||||
- name: redis-data
|
||||
mountPath: /data
|
||||
subPath:
|
||||
- name: config
|
||||
mountPath: /opt/bitnami/redis/mounted-etc
|
||||
- name: redis-tmp-conf
|
||||
mountPath: /opt/bitnami/redis/etc/
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
volumes:
|
||||
- name: start-scripts
|
||||
configMap:
|
||||
name: helm-redis-scripts
|
||||
defaultMode: 0755
|
||||
- name: health
|
||||
configMap:
|
||||
name: helm-redis-health
|
||||
defaultMode: 0755
|
||||
- name: config
|
||||
configMap:
|
||||
name: helm-redis-configuration
|
||||
- name: redis-tmp-conf
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
- name: redis-data
|
||||
emptyDir: {}
|
||||
---
|
||||
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-redis-replicas
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
app.kubernetes.io/component: replica
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/component: replica
|
||||
serviceName: helm-redis-headless
|
||||
updateStrategy:
|
||||
rollingUpdate: {}
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: redis-db
|
||||
helm.sh/chart: redis-db-15.4.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: redis-db
|
||||
app.kubernetes.io/component: replica
|
||||
annotations:
|
||||
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
|
||||
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
|
||||
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
|
||||
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
|
||||
spec:
|
||||
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
serviceAccountName: helm-redis
|
||||
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: redis
|
||||
image: "172.32.12.34:8033/cmii/redis:6.2.6-debian-10-r0"
|
||||
imagePullPolicy: "Always"
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
command:
|
||||
- /bin/bash
|
||||
args:
|
||||
- -c
|
||||
- /opt/bitnami/scripts/start-scripts/start-replica.sh
|
||||
env:
|
||||
- name: BITNAMI_DEBUG
|
||||
value: "false"
|
||||
- name: REDIS_REPLICATION_MODE
|
||||
value: slave
|
||||
- name: REDIS_MASTER_HOST
|
||||
value: helm-redis-master-0.helm-redis-headless.xtgt.svc.cluster.local
|
||||
- name: REDIS_MASTER_PORT_NUMBER
|
||||
value: "6379"
|
||||
- name: ALLOW_EMPTY_PASSWORD
|
||||
value: "no"
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: helm-redis
|
||||
key: redis-password
|
||||
- name: REDIS_MASTER_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: helm-redis
|
||||
key: redis-password
|
||||
- name: REDIS_TLS_ENABLED
|
||||
value: "no"
|
||||
- name: REDIS_PORT
|
||||
value: "6379"
|
||||
ports:
|
||||
- name: redis
|
||||
containerPort: 6379
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 6
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- /health/ping_liveness_local_and_master.sh 5
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 2
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- /health/ping_readiness_local_and_master.sh 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: 8Gi
|
||||
requests:
|
||||
cpu: "1"
|
||||
memory: 4Gi
|
||||
volumeMounts:
|
||||
- name: start-scripts
|
||||
mountPath: /opt/bitnami/scripts/start-scripts
|
||||
- name: health
|
||||
mountPath: /health
|
||||
- name: redis-data
|
||||
mountPath: /data
|
||||
subPath:
|
||||
- name: config
|
||||
mountPath: /opt/bitnami/redis/mounted-etc
|
||||
- name: redis-tmp-conf
|
||||
mountPath: /opt/bitnami/redis/etc
|
||||
volumes:
|
||||
- name: start-scripts
|
||||
configMap:
|
||||
name: helm-redis-scripts
|
||||
defaultMode: 0755
|
||||
- name: health
|
||||
configMap:
|
||||
name: helm-redis-health
|
||||
defaultMode: 0755
|
||||
- name: config
|
||||
configMap:
|
||||
name: helm-redis-configuration
|
||||
- name: redis-tmp-conf
|
||||
emptyDir: {}
|
||||
- name: redis-data
|
||||
emptyDir: {}
|
||||
|
||||
NOTES:
|
||||
[INFO] Applications should be deployed successfully !!!
|
||||
1237
3-湘潭钢铁项目/2-helm-chart/5-front.yaml
Normal file
1237
3-湘潭钢铁项目/2-helm-chart/5-front.yaml
Normal file
File diff suppressed because it is too large
Load Diff
3711
3-湘潭钢铁项目/2-helm-chart/6-back.yaml
Normal file
3711
3-湘潭钢铁项目/2-helm-chart/6-back.yaml
Normal file
File diff suppressed because it is too large
Load Diff
388
3-湘潭钢铁项目/2-helm-chart/7-srs.yaml
Normal file
388
3-湘潭钢铁项目/2-helm-chart/7-srs.yaml
Normal file
@@ -0,0 +1,388 @@
|
||||
---
|
||||
# Source: outside-deploy/charts/srs-cluster/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-srs-cm
|
||||
namespace: xtgt
|
||||
labels:
|
||||
cmii.app: video-live-srs
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: srs-cluster-2.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "2.0.0"
|
||||
data:
|
||||
srs.rtc.conf: |-
|
||||
listen 30935;
|
||||
max_connections 4096;
|
||||
srs_log_tank console;
|
||||
srs_log_file /home/srs.log;
|
||||
daemon off;
|
||||
http_api {
|
||||
enabled on;
|
||||
listen 30985;
|
||||
crossdomain on;
|
||||
}
|
||||
stats {
|
||||
network 0;
|
||||
}
|
||||
http_server {
|
||||
enabled on;
|
||||
listen 30080;
|
||||
dir /home/hls;
|
||||
}
|
||||
rtc_server {
|
||||
enabled on;
|
||||
listen 30090;
|
||||
candidate $CANDIDATE;
|
||||
}
|
||||
vhost __defaultVhost__ {
|
||||
http_hooks {
|
||||
enabled on;
|
||||
on_publish http://helm-srs-op-svc:8080/hooks/on_publish;
|
||||
}
|
||||
http_remux {
|
||||
enabled on;
|
||||
}
|
||||
rtc {
|
||||
enabled on;
|
||||
rtmp_to_rtc on;
|
||||
rtc_to_rtmp on;
|
||||
keep_bframe off;
|
||||
}
|
||||
tcp_nodelay on;
|
||||
min_latency on;
|
||||
play {
|
||||
gop_cache off;
|
||||
mw_latency 0;
|
||||
mw_msgs 0;
|
||||
}
|
||||
publish {
|
||||
firstpkt_timeout 4000;
|
||||
normal_timeout 2000;
|
||||
mr off;
|
||||
}
|
||||
dvr {
|
||||
enabled off;
|
||||
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
|
||||
dvr_plan session;
|
||||
}
|
||||
hls {
|
||||
enabled on;
|
||||
hls_path /home/hls;
|
||||
hls_fragment 10;
|
||||
hls_window 60;
|
||||
hls_m3u8_file [app]/[stream].m3u8;
|
||||
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
|
||||
hls_cleanup on;
|
||||
}
|
||||
}
|
||||
|
||||
srs.op.conf: |-
|
||||
debug: false
|
||||
server:
|
||||
port: 8080
|
||||
|
||||
spring:
|
||||
application:
|
||||
name: cmii-srs-operator
|
||||
platform:
|
||||
info:
|
||||
name: cmii-live-srs-operator
|
||||
description: cmii-live-srs-operator
|
||||
version: 1.2.0
|
||||
scanPackage: com.cmii.live
|
||||
datasource:
|
||||
type: com.alibaba.druid.pool.DruidDataSource
|
||||
url: jdbc:mysql://helm-mysql:3306/cmii_live_srs_op?characterEncoding=utf8&useSSL=false&serverTimezone=GMT%2B8
|
||||
username: k8s_admin
|
||||
password: fP#UaH6qQ3)8
|
||||
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||
boot:
|
||||
admin:
|
||||
client:
|
||||
enabled: false
|
||||
url: http://127.0.0.1:8888
|
||||
instance:
|
||||
service-url: http://127.0.0.1:8080
|
||||
|
||||
druid:
|
||||
mysql:
|
||||
usePingMethod: false
|
||||
|
||||
management:
|
||||
endpoints:
|
||||
enabled-by-default: true
|
||||
web:
|
||||
exposure:
|
||||
include: '*'
|
||||
endpoint:
|
||||
health:
|
||||
show-details: always
|
||||
|
||||
live:
|
||||
srs:
|
||||
rtmp-base: "rtmp://172.32.12.34:30935"
|
||||
rtsp-base: "rtsp://172.32.12.34:30554"
|
||||
srt-base: "srt://172.32.12.34:23333"
|
||||
flv-base: "http://172.32.12.34:30500"
|
||||
rtc-base: "webrtc://172.32.12.34:30500"
|
||||
api-base: "http://helm-srs-rtc-svc:30985"
|
||||
hls:
|
||||
max-ts: 200
|
||||
interval-ms: 6000
|
||||
minio:
|
||||
endpoint: http://172.32.12.38:9000
|
||||
access-key: cmii
|
||||
secret-key: B#923fC7mk
|
||||
bucket: srs-hls
|
||||
sync:
|
||||
onStart: false
|
||||
pool:
|
||||
core: 8
|
||||
max: 12
|
||||
queue: 0
|
||||
keepalive: 20
|
||||
interval:
|
||||
sync: 150
|
||||
elect: 8
|
||||
keepalive: 20
|
||||
heartbeat: 8
|
||||
|
||||
logging:
|
||||
level:
|
||||
com.cmii.live.srs.mapper: info
|
||||
---
|
||||
# Source: outside-deploy/charts/srs-cluster/templates/service.yaml
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: srs-rtc
|
||||
namespace: xtgt
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
selector:
|
||||
srs-role: webrtc
|
||||
ports:
|
||||
- name: srsrtc-rtmp
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
- name: srsrtc-hls
|
||||
port: 30080
|
||||
targetPort: 30080
|
||||
---
|
||||
# Source: outside-deploy/charts/srs-cluster/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-srs-rtc-svc
|
||||
namespace: xtgt
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
srs-role: webrtc
|
||||
ports:
|
||||
- name: srs-rtmp
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
nodePort: 30935
|
||||
- name: srs-api
|
||||
port: 30985
|
||||
targetPort: 30985
|
||||
nodePort: 30985
|
||||
- name: srs-rtc
|
||||
port: 30090
|
||||
targetPort: 30090
|
||||
nodePort: 30090
|
||||
protocol: UDP
|
||||
- name: srs-flv
|
||||
port: 30080
|
||||
targetPort: 30080
|
||||
nodePort: 30080
|
||||
---
|
||||
# Source: outside-deploy/charts/srs-cluster/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-srs-op-svc
|
||||
namespace: xtgt
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
srs-role: op
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
---
|
||||
# Source: outside-deploy/charts/srs-cluster/templates/operator-deploy.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: helm-srs-op
|
||||
namespace: xtgt
|
||||
labels:
|
||||
srs-role: op
|
||||
cmii.app: video-live-srs
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: srs-cluster-2.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "2.0.0"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
srs-role: op
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
srs-role: op
|
||||
cmii.app: video-live-srs
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: srs-cluster-2.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "2.0.0"
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity:
|
||||
{}
|
||||
containers:
|
||||
- name: operator
|
||||
image: "172.32.12.34:8033/cmii/cmii-srs-operator:v1.0.0"
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
limits:
|
||||
memory: 4Gi
|
||||
cpu: 4800m
|
||||
requests:
|
||||
memory: 256Mi
|
||||
cpu: 100m
|
||||
ports:
|
||||
- name: operator
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: srs-conf-file
|
||||
mountPath: /cmii/application.yaml
|
||||
subPath: application.yaml
|
||||
volumes:
|
||||
- name: srs-conf-file
|
||||
configMap:
|
||||
name: "helm-srs-cm"
|
||||
items:
|
||||
- key: "srs.op.conf"
|
||||
path: "application.yaml"
|
||||
---
|
||||
# Source: outside-deploy/charts/srs-cluster/templates/webrtc-statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-srs-rtc
|
||||
namespace: xtgt
|
||||
labels:
|
||||
srs-role: webrtc
|
||||
cmii.app: video-live-srs
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: srs-cluster-2.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "2.0.0"
|
||||
spec:
|
||||
serviceName: srsrtc
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
srs-role: webrtc
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
srs-role: webrtc
|
||||
cmii.app: video-live-srs
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: srs-cluster-2.1.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: "2.0.0"
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity:
|
||||
containers:
|
||||
- name: helm-srs
|
||||
image: "172.32.12.34:8033/cmii/ossrs/srs:v4.0-b9"
|
||||
resources:
|
||||
limits:
|
||||
memory: 4Gi
|
||||
cpu: 1200m
|
||||
requests:
|
||||
memory: 256Mi
|
||||
cpu: 100m
|
||||
ports:
|
||||
- name: srs-rtmp
|
||||
containerPort: 30935
|
||||
protocol: TCP
|
||||
- name: srs-api
|
||||
containerPort: 30985
|
||||
protocol: TCP
|
||||
- name: srs-flv
|
||||
containerPort: 30080
|
||||
protocol: TCP
|
||||
- name: srs-webrtc
|
||||
containerPort: 30090
|
||||
protocol: UDP
|
||||
env:
|
||||
- name: CANDIDATE
|
||||
value: "172.32.12.34"
|
||||
volumeMounts:
|
||||
- name: srs-conf-file
|
||||
mountPath: /usr/local/srs/conf/docker.conf
|
||||
subPath: docker.conf
|
||||
- name: srs-vol
|
||||
mountPath: /home/dvr
|
||||
subPath: "default/helm-srs/dvr"
|
||||
- name: srs-vol
|
||||
mountPath: /home/hls
|
||||
subPath: "default/helm-srs/hls"
|
||||
- name: oss-adaptor
|
||||
image: "172.32.12.34:8033/cmii/cmii-srs-oss-adaptor:v1.0.0-no-retention"
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
limits:
|
||||
memory: 4Gi
|
||||
cpu: 1200m
|
||||
requests:
|
||||
memory: 256Mi
|
||||
cpu: 100m
|
||||
env:
|
||||
- name: OSS_ENDPOINT
|
||||
value: http://172.32.12.38:9000
|
||||
- name: OSS_AK
|
||||
value: ossuser
|
||||
- name: OSS_SK
|
||||
value: B#923fC7mk
|
||||
- name: OSS_BUCKET
|
||||
value: srs-hls
|
||||
- name: SRS_OP
|
||||
value: "http://helm-srs-op-svc:8080"
|
||||
- name: MYSQL_ENDPOINT
|
||||
value: helm-mysql:3306
|
||||
- name: MYSQL_USERNAME
|
||||
value: k8s_admin
|
||||
- name: MYSQL_PASSWORD
|
||||
value: fP#UaH6qQ3)8
|
||||
volumeMounts:
|
||||
- name: srs-vol
|
||||
mountPath: /cmii/share/hls
|
||||
subPath: default/helm-srs/hls
|
||||
volumes:
|
||||
- name: srs-conf-file
|
||||
configMap:
|
||||
name: "helm-srs-cm"
|
||||
items:
|
||||
- key: "srs.rtc.conf"
|
||||
path: "docker.conf"
|
||||
- name: srs-vol
|
||||
emptyDir:
|
||||
sizeLimit: 10Gi
|
||||
|
||||
28
3-湘潭钢铁项目/2-helm-chart/8-mysql-service.yaml
Normal file
28
3-湘潭钢铁项目/2-helm-chart/8-mysql-service.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cmii-mysql
|
||||
namespace: xtgt
|
||||
labels:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
helm.sh/chart: mysql-db-8.8.1
|
||||
app.kubernetes.io/release: xtgt
|
||||
app.kubernetes.io/managed-by: mysql-db
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
annotations:
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: mysql
|
||||
port: 13306
|
||||
protocol: TCP
|
||||
targetPort: mysql
|
||||
nodePort: null
|
||||
selector:
|
||||
app.kubernetes.io/name: mysql-db
|
||||
app.kubernetes.io/release: xtgt
|
||||
cmii.type: middleware
|
||||
cmii.app: mysql
|
||||
app.kubernetes.io/component: primary
|
||||
19
3-湘潭钢铁项目/2-helm-chart/Chart.yaml
Normal file
19
3-湘潭钢铁项目/2-helm-chart/Chart.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v2
|
||||
|
||||
name: outside-deploy
|
||||
|
||||
description: 中移凌云平台对外交付部署,标准化Charts。提供业务应用(前端、后端),中间件(mysql、redis、minio等)一键式部署的能力
|
||||
|
||||
deprecated: true
|
||||
|
||||
type: application
|
||||
|
||||
version: 1.1.0
|
||||
|
||||
appVersion: 2.2.2
|
||||
|
||||
kubeVersion: ^1.16.0-0
|
||||
|
||||
maintainers:
|
||||
- name: super wdd
|
||||
email: wangziwen@cmii.chinamobile.com
|
||||
24
3-湘潭钢铁项目/2-helm-chart/charts/all-ingress-config/.helmignore
Normal file
24
3-湘潭钢铁项目/2-helm-chart/charts/all-ingress-config/.helmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
ci/*
|
||||
30
3-湘潭钢铁项目/2-helm-chart/charts/all-ingress-config/Chart.yaml
Normal file
30
3-湘潭钢铁项目/2-helm-chart/charts/all-ingress-config/Chart.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: v2
|
||||
name: all-ingress-config
|
||||
description: 中移凌云平台所有的Ingress配置,包含前端,后端,Api/Gateway等
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 2.2.2
|
||||
|
||||
keywords:
|
||||
- uavcloud
|
||||
- ingress
|
||||
- template
|
||||
- function
|
||||
- chinamobile
|
||||
@@ -0,0 +1,33 @@
|
||||
{{- if or .Values.frontendApplication.enabled .Values.backendApplication.enabled .Values.apiGatewayApplication.enabled }}
|
||||
{{- if .Values.frontendApplication.enabled }}
|
||||
-- 已经开启前端应用的Ingress!
|
||||
-- The Ingress of the front-end application has been opened!
|
||||
{{- end }}
|
||||
{{- if .Values.backendApplication.enabled }}
|
||||
-- 已经开启后端应用的Ingress!
|
||||
-- The Ingress of the back-end application has been opened!
|
||||
{{- end }}
|
||||
{{- if .Values.apiGatewayApplication.enabled }}
|
||||
-- 已经开启Api和Gateway应用的Ingress!
|
||||
-- Ingress of Api and Gateway applications has been opened!
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
[ERROR] 您并没有选择开启任何应用的Ingress!仅仅更新ConfigMap!!
|
||||
[ERROR] You did not choose to open Ingress for any app! Just update the ConfigMap! !
|
||||
{{- end }}
|
||||
{{ if .Values.global.domain.IsPrivateDeployment }}
|
||||
-- 您选择了【私有化】域名部署!!
|
||||
-- [INFO] 请注意域名中不包含前缀!
|
||||
-- 当前中移凌云的主页访问地址为:{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/
|
||||
-- You have chosen [Private] domain name deployment! !
|
||||
-- [INFO] Please note that the domain name does not contain a prefix!
|
||||
-- The current homepage access address of China Mobile Lingyun is:{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/
|
||||
{{- else }}
|
||||
-- 当前部署的租户环境为:{{ .Values.global.domain.TenantEnvironment }}
|
||||
-- 当前中移凌云的主页访问地址为:{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/{{ .Values.global.domain.TenantEnvironment }}/
|
||||
-- The currently deployed tenant environment is: {{ .Values.global.domain.TenantEnvironment }}
|
||||
-- The current homepage access address of China Mobile Lingyun is:{{- if .Values.global.ingress.tls_enabled -}}https://{{- else -}}http://{{- end -}}{{ .Values.global.domain.DeployDomainName }}/{{ .Values.global.domain.TenantEnvironment }}/
|
||||
{{- end }}
|
||||
|
||||
[SUCCESS] Ingress Deployment has been accomplished !!!
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "all-ingress.name" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Create the tls configuration for https to enable trafik-ingress
|
||||
*/}}
|
||||
{{- define "all-ingress-front.full.applications" -}}
|
||||
- cmii-uav-platform
|
||||
- cmii-uav-platform-ai-brain
|
||||
- cmii-uav-platform-hyperspectral
|
||||
- cmii-uav-platform-mws
|
||||
- cmii-uav-platform-oms
|
||||
- cmii-uav-platform-open
|
||||
- cmii-uav-platform-splice
|
||||
- cmii-uav-platform-splice-visual
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
common annotations of frontend applications
|
||||
*/}}
|
||||
{{- define "all-ingress.frontend.commom.annotations" -}}
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Specific annotations created for api applications
|
||||
*/}}
|
||||
{{- define "all-ingress.all-apis.commom.annotations" -}}
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
the specific annotations for project-minio
|
||||
*/}}
|
||||
{{/*{{- define "uavcloud-ingress.apiGateway.project-minio.annotations" -}}*/}}
|
||||
{{/*kubernetes.io/ingress.class: "nginx"*/}}
|
||||
{{/*nginx.ingress.kubernetes.io/enable-cors: "true"*/}}
|
||||
{{/*nginx.ingress.kubernetes.io/rewrite-target: /api/uav/minio/$2*/}}
|
||||
{{/*{{- end }}*/}}
|
||||
@@ -0,0 +1,51 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
|
||||
{{- $DeployDomainName := .Values.global.domain.DeployDomainName -}}
|
||||
{{- $IsPrivateDeployment := .Values.global.domain.IsPrivateDeployment -}}
|
||||
{{- range $applicationName, $values := .Values.frontendApplication.manifest }}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
{{- if eq $values.shortname "" }}
|
||||
name: tenant-prefix-platform
|
||||
{{- else }}
|
||||
name: tenant-prefix-{{ $values.shortname }}
|
||||
{{- end }}
|
||||
namespace: {{ $namespace }}
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
// 从ConfigMap中注入
|
||||
// injected from ConfigMap
|
||||
var __GlobalIngressConfig = {
|
||||
{{- if $IsPrivateDeployment }}
|
||||
TenantEnvironment: "",
|
||||
{{- else }}
|
||||
TenantEnvironment: {{ $TenantEnvironment | quote -}},
|
||||
{{- end }}
|
||||
CloudHOST: {{ $DeployDomainName | quote -}},
|
||||
{{- if not (contains "appli" $values.shortname ) }}
|
||||
{{- /* short name not contains application, judge for situations */}}
|
||||
{{- if contains "cms-portal" $values.shortname }}
|
||||
ApplicationShortName: "cmsportal",
|
||||
{{- else if contains "-rescue" $values.shortname }}
|
||||
ApplicationShortName: {{ trimSuffix "-rescue" $values.shortname | quote -}},
|
||||
{{- else if contains "screen" $values.shortname }}
|
||||
ApplicationShortName: {{ trimSuffix "-screen" $values.shortname | quote -}},
|
||||
{{- else }}
|
||||
ApplicationShortName: {{ $values.shortname | quote }},
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
ApplicationShortName: {{ trimSuffix "-application" $values.shortname | quote -}},
|
||||
{{- end }}
|
||||
{{- range $appShortNamePlusTenantEnv, $appClientId := $values}}
|
||||
{{- $realApplicationNamePlusTenantEnv := cat $values.shortname $TenantEnvironment | replace " " "-" }}
|
||||
{{- if hasPrefix $appShortNamePlusTenantEnv $realApplicationNamePlusTenantEnv }}
|
||||
AppClientId: {{ $appClientId | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,215 @@
|
||||
{{- if and .Values.enabled .Values.apiGatewayApplication.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
|
||||
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
|
||||
{{- $IsPrivateDeployment := .Values.global.domain.IsPrivateDeployment -}}
|
||||
{{- $IsTlsEnables := .Values.global.ingress.tls_enabled -}}
|
||||
{{- $scope := $ -}}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: all-gateways-ingress
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
type: {{ $.Values.apiGatewayApplication.type }}
|
||||
accessmode: {{ $.Values.apiGatewayApplication.accessmode }}
|
||||
helm.sh/chart: {{ include "all-ingress.name" $scope }}
|
||||
app.kubernetes.io/managed-by: {{ $.Release.Service }}
|
||||
{{- if $.Values.global.image.tag }}
|
||||
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- include "all-ingress.frontend.commom.annotations" $scope | nindent 4 }}
|
||||
{{- if $IsTlsEnables }}
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/permanent-redirect-code: '301'
|
||||
{{- end }}
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
spec:
|
||||
rules:
|
||||
- host: {{ $DeployDomainName }}
|
||||
http:
|
||||
paths:
|
||||
{{- if $IsPrivateDeployment }}
|
||||
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
|
||||
{{- if eq $value false }}
|
||||
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
|
||||
{{- if eq $applicationName "material-warehouse" }}
|
||||
- path: /api/warehouses/?(.*)
|
||||
{{- else if eq $applicationName "gateway" }}
|
||||
- path: /api/?(.*)
|
||||
{{- else if eq $applicationName "admin-gateway" }}
|
||||
- path: /oms/api/?(.*)
|
||||
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
|
||||
{{- /* - path: /?(.*)/api/minios/?(.*)*/}}
|
||||
{{- else if eq $applicationName "open-gateway" }}
|
||||
- path: /open/api/?(.*)
|
||||
{{- else }}
|
||||
- path: /{{ $applicationName }}/?(.*)
|
||||
{{- end }}
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $key }}
|
||||
servicePort: 8080
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
|
||||
{{- if eq $value false }}
|
||||
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
|
||||
{{- if eq $applicationName "material-warehouse" }}
|
||||
- path: /{{ $TenantEnvironment }}/api/warehouses/?(.*)
|
||||
{{- else if eq $applicationName "gateway" }}
|
||||
- path: /{{ $TenantEnvironment }}/api/?(.*)
|
||||
{{- else if eq $applicationName "admin-gateway" }}
|
||||
- path: /{{ $TenantEnvironment }}/oms/api/?(.*)
|
||||
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
|
||||
{{- /* - path: /{{ $TenantEnvironment }}/?(.*)/api/minios/?(.*)*/}}
|
||||
{{- else if eq $applicationName "open-gateway" }}
|
||||
- path: /{{ $TenantEnvironment }}/open/api/?(.*)
|
||||
{{- else }}
|
||||
- path: /{{ $applicationName }}/?(.*)
|
||||
{{- end }}
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $key }}
|
||||
servicePort: 8080
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $IsTlsEnables }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ $DeployDomainName }}
|
||||
- secretName: {{ $DeployDomainName | quote }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: all-apis-ingress
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
type: {{ $.Values.apiGatewayApplication.type }}
|
||||
accessmode: {{ $.Values.apiGatewayApplication.accessmode }}
|
||||
helm.sh/chart: {{ include "all-ingress.name" $scope }}
|
||||
app.kubernetes.io/managed-by: {{ $.Release.Service }}
|
||||
{{- if $.Values.global.image.tag }}
|
||||
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- include "all-ingress.all-apis.commom.annotations" $scope | nindent 4 }}
|
||||
{{- if $IsTlsEnables }}
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/permanent-redirect-code: '301'
|
||||
{{- end }}
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
spec:
|
||||
rules:
|
||||
- host: {{ $DeployDomainName }}
|
||||
http:
|
||||
paths:
|
||||
{{- if $IsPrivateDeployment }}
|
||||
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
|
||||
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
|
||||
{{- if $value }}
|
||||
{{- if eq $applicationName "material-warehouse" }}
|
||||
- path: /?(.*)/api/warehouses/?(.*)
|
||||
{{- else if eq $applicationName "gateway" }}
|
||||
- path: /api/?(.*)
|
||||
{{- else if eq $applicationName "admin-gateway" }}
|
||||
- path: /oms/api/?(.*)
|
||||
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
|
||||
{{- /* - path: /?(.*)/api/minios/?(.*)*/}}
|
||||
{{- else if eq $applicationName "open-gateway" }}
|
||||
- path: /open/api/?(.*)
|
||||
{{- else }}
|
||||
- path: /{{ $applicationName }}/?(.*)
|
||||
{{- end }}
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $key }}
|
||||
servicePort: 8080
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- range $key,$value := .Values.apiGatewayApplication.manifest }}
|
||||
{{- if $value }}
|
||||
{{- $applicationName := ternary (trimPrefix "cmii-uav-" $key) (trimPrefix "cmii-" $key) (contains "cmii-uav" $key) -}}
|
||||
{{- if eq $applicationName "material-warehouse" }}
|
||||
- path: /{{ $TenantEnvironment }}/?(.*)/api/warehouses/?(.*)
|
||||
{{- else if eq $applicationName "gateway" }}
|
||||
- path: /{{ $TenantEnvironment }}/api/?(.*)
|
||||
{{- else if eq $applicationName "admin-gateway" }}
|
||||
- path: /{{ $TenantEnvironment }}/oms/api/?(.*)
|
||||
{{- /* {{- else if eq $applicationName "project-minio" }}*/}}
|
||||
{{- /* - path: /{{ $TenantEnvironment }}/?(.*)/api/minios/?(.*)*/}}
|
||||
{{- else if eq $applicationName "open-gateway" }}
|
||||
- path: /{{ $TenantEnvironment }}/open/api/?(.*)
|
||||
{{- else }}
|
||||
- path: /{{ $applicationName }}/?(.*)
|
||||
{{- end }}
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $key }}
|
||||
servicePort: 8080
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $IsTlsEnables }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ $DeployDomainName }}
|
||||
- secretName: {{ $DeployDomainName | quote }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
{{/*apiVersion: networking.k8s.io/v1beta1*/}}
|
||||
{{/*kind: Ingress*/}}
|
||||
{{/*metadata:*/}}
|
||||
{{/* name: project-minio-ingress*/}}
|
||||
{{/* namespace: {{ $namespace }}*/}}
|
||||
{{/* labels:*/}}
|
||||
{{/* type: {{ $.Values.apiGatewayApplication.type }}*/}}
|
||||
{{/* accessmode: {{ $.Values.apiGatewayApplication.accessmode }}*/}}
|
||||
{{/* helm.sh/chart: {{ include "uavcloud-ingress.name" $scope }}*/}}
|
||||
{{/* app.kubernetes.io/managed-by: {{ $.Release.Service }}*/}}
|
||||
{{/* {{- if $.Values.global.image.tag }}*/}}
|
||||
{{/* app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{/* annotations:*/}}
|
||||
{{/* {{- include "uavcloud-ingress.apiGateway.project-minio.annotations" $scope | nindent 4 }}*/}}
|
||||
{{/* {{- if $IsTlsEnables }}*/}}
|
||||
{{/* nginx.ingress.kubernetes.io/ssl-redirect: "true"*/}}
|
||||
{{/* nginx.ingress.kubernetes.io/permanent-redirect-code: '301'*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{/* nginx.ingress.kubernetes.io/configuration-snippet: |*/}}
|
||||
{{/* proxy_set_header Upgrade $http_upgrade;*/}}
|
||||
{{/* proxy_set_header Connection "Upgrade";*/}}
|
||||
{{/*spec:*/}}
|
||||
{{/* rules:*/}}
|
||||
{{/* - host: {{ $DeployDomainName }}*/}}
|
||||
{{/* http:*/}}
|
||||
{{/* paths:*/}}
|
||||
{{/* {{- if $IsPrivateDeployment }}*/}}
|
||||
{{/* - path: /?(.*)/api/minio/?(.*)*/}}
|
||||
{{/* {{- else }}*/}}
|
||||
{{/* - path: /{{ $TenantEnvironment }}/?(.*)/api/minio/?(.*)*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{/* pathType: ImplementationSpecific*/}}
|
||||
{{/* backend:*/}}
|
||||
{{/* serviceName: cmii-project-minio*/}}
|
||||
{{/* servicePort: 8080*/}}
|
||||
{{/* {{- if $IsTlsEnables }}*/}}
|
||||
{{/* tls:*/}}
|
||||
{{/* - hosts:*/}}
|
||||
{{/* - {{ $DeployDomainName }}*/}}
|
||||
{{/* - secretName: {{ $DeployDomainName | quote }}*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{/*---*/}}
|
||||
@@ -0,0 +1,35 @@
|
||||
{{- if and .Values.enabled .Values.backendApplication.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
|
||||
{{- $scope := $ -}}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: backend-applications-ingress
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
type: {{ .Values.backendApplication.type }}
|
||||
accessmode: {{ $.Values.backendApplication.accessmode }}
|
||||
helm.sh/chart: {{ include "all-ingress.name" $scope }}
|
||||
app.kubernetes.io/managed-by: {{ $.Release.Service }}
|
||||
{{- if $.Values.global.image.tag }}
|
||||
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
spec:
|
||||
rules:
|
||||
{{- range $key,$value := .Values.backendApplication.manifest }}
|
||||
{{- $applicationName := $key | trunc 63 }}
|
||||
- host: {{ $applicationName }}.uavcloud-{{ $TenantEnvironment }}.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: {{ $applicationName }}
|
||||
servicePort: 8080
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,121 @@
|
||||
{{- if and .Values.enabled .Values.frontendApplication.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $TenantEnvironment := .Values.global.domain.TenantEnvironment -}}
|
||||
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
|
||||
{{- $IsPrivateDeployment := .Values.global.domain.IsPrivateDeployment -}}
|
||||
{{- $scope := $ -}}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: frontend-applications-ingress
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
type: {{ .Values.frontendApplication.type }}
|
||||
accessmode: {{ $.Values.frontendApplication.accessmode }}
|
||||
helm.sh/chart: {{ include "all-ingress.name" $scope }}
|
||||
app.kubernetes.io/managed-by: {{ $.Release.Service }}
|
||||
{{- if $.Values.global.image.tag }}
|
||||
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- include "all-ingress.frontend.commom.annotations" $scope | nindent 4 }}
|
||||
{{- if .Values.global.ingress.tls_enabled }}
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/permanent-redirect-code: '301'
|
||||
{{- end }}
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
{{- range $applicationName, $values := .Values.frontendApplication.manifest }}
|
||||
{{- if $IsPrivateDeployment }}
|
||||
{{- if eq $values.shortname "" }}
|
||||
{{- /* 主域名的情况, 域名改造 */}}
|
||||
rewrite ^(/green)$ $1/ redirect;
|
||||
rewrite ^(/supervision)$ $1/ redirect;
|
||||
rewrite ^(/inspection)$ $1/ redirect;
|
||||
rewrite ^(/pangu)$ $1/ redirect;
|
||||
{{- /* 主域名的情况, 域名改造 end end end */}}
|
||||
{{- else if not (contains "appli" $values.shortname) }}
|
||||
{{- /* 特殊短域名的情况 */}}
|
||||
{{- /* short name not contains application, judge for situations */}}
|
||||
{{- if contains "-portal" $values.shortname}}
|
||||
rewrite ^(/cmsportal)$ $1/ redirect;
|
||||
{{- else if contains "-rescue" $values.shortname }}
|
||||
rewrite ^(/{{ trimSuffix "-rescue" $values.shortname }})$ $1/ redirect;
|
||||
{{- else if contains "screen" $values.shortname }}
|
||||
rewrite ^(/ {{ trimSuffix "-screen" $values.shortname }})$ $1/ redirect;
|
||||
{{- else }}
|
||||
{{- /* 没有特殊规则的域名 全部会出现在这里 */}}
|
||||
rewrite ^(/{{ $values.shortname }})$ $1/ redirect;
|
||||
{{- end }}
|
||||
{{- else if (contains "appli" $values.shortname) }}
|
||||
rewrite ^(/{{ trimSuffix "-application" $values.shortname }})$ $1/ redirect;
|
||||
{{- else }}
|
||||
{{- /* 备份一下 以防万一 */}}
|
||||
rewrite ^(/{{ $values.shortname }})$ $1/ redirect;
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- if eq $values.shortname "" }}
|
||||
rewrite ^(/{{- $TenantEnvironment -}})$ $1/ redirect;
|
||||
{{- else }}
|
||||
rewrite ^(/{{- cat $TenantEnvironment $values.shortname | replace " " "/" -}})$ $1/ redirect;
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
rules:
|
||||
- host: {{ $DeployDomainName }}
|
||||
http:
|
||||
paths:
|
||||
{{- /* 域名改造, add for demo */}}
|
||||
- path: /inspection/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /supervision/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /green/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
- path: /pangu/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform
|
||||
servicePort: 9528
|
||||
{{- /* 域名改造, end end end */}}
|
||||
{{- range $applicationName, $values := .Values.frontendApplication.manifest }}
|
||||
{{- if $IsPrivateDeployment }}
|
||||
{{- if eq $values.shortname ""}}
|
||||
- path: /?(.*)
|
||||
{{- else if (contains "appli" $values.shortname) }}
|
||||
- path: /{{ trimSuffix "-application" $values.shortname }}/?(.*)
|
||||
{{- else }}
|
||||
- path: /{{ $values.shortname }}/?(.*)
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- if eq $values.shortname ""}}
|
||||
- path: /{{ $TenantEnvironment }}/?(.*)
|
||||
{{- else if not (contains "appli" $values.shortname) }}
|
||||
- path: /{{ $TenantEnvironment }}/{{ trimSuffix "-application" $values.shortname }}/?(.*)
|
||||
{{- else }}
|
||||
- path: /{{- cat $TenantEnvironment $values.shortname | replace " " "/" -}}/?(.*)
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $applicationName }}
|
||||
servicePort: 9528
|
||||
{{- end }}
|
||||
{{- if .Values.global.ingress.tls_enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ $DeployDomainName }}
|
||||
- secretName: {{ $DeployDomainName | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
156
3-湘潭钢铁项目/2-helm-chart/charts/all-ingress-config/values.yaml
Normal file
156
3-湘潭钢铁项目/2-helm-chart/charts/all-ingress-config/values.yaml
Normal file
@@ -0,0 +1,156 @@
|
||||
# Default values for uavcloud-ingress-core.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
domain:
|
||||
DeployDomainName: "lab.uavcmlc.com:31500" # will be overridden by upper values
|
||||
TenantEnvironment: "outside" # please don't modify this, unless you know what you are doing
|
||||
IsPrivateDeployment: true # please don't modify this, unless you know what you are doing
|
||||
ingress:
|
||||
tls_enabled: false
|
||||
|
||||
enabled: true
|
||||
|
||||
frontendApplication:
|
||||
enabled: true
|
||||
type: frontend
|
||||
accessmode: public
|
||||
manifest:
|
||||
# AppFullName vs AppShortName for frontend applications
|
||||
cmii-uav-platform:
|
||||
shortname: ""
|
||||
cmii-uav-platform-ai-brain:
|
||||
shortname: "ai-brain"
|
||||
ai-brain-dev: APP_i6mlNKODBo42OIVn
|
||||
ai-brain-test: APP_0BF17ayVaSpY89O4
|
||||
ai-brain-feature: APP_0BF17ayVaSpY89O4
|
||||
ai-brain-devbase: APP_0BF17ayVaSpY89O4
|
||||
ai-brain-uat: APP_kZeiSXxg9qZxj6ue
|
||||
ai-brain-demo: APP_rafnuCAmBESIVYMH
|
||||
ai-brain-outside: APP_rafnuCAmBESIVYMH # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-hyperspectral:
|
||||
shortname: "hyper"
|
||||
hyper-dev: APP_CN713PC4qwViGj1d
|
||||
hyper-test: APP_xtN9XF2L1J4IRHaB
|
||||
hyper-devbase: APP_xtN9XF2L1J4IRHaB
|
||||
hyper-feature: APP_xtN9XF2L1J4IRHaB
|
||||
hyper-uat: APP_OT4l1kYrzWT4tiif
|
||||
hyper-demo: APP_xbMkKdsbsbv8SH4w
|
||||
hyper-outside: APP_xbMkKdsbsbv8SH4w # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-mws:
|
||||
shortname: "mws"
|
||||
mws-dev: APP_4lVSVI0ZGxTssir8
|
||||
mws-test: APP_MEeBJHp1fSVD1Wuw
|
||||
mws-devbase: APP_MEeBJHp1fSVD1Wuw
|
||||
mws-feature: APP_MEeBJHp1fSVD1Wuw
|
||||
mws-uat: APP_U4GEiHutGQL5prSP
|
||||
mws-demo: APP_uKniXPELlRERBBwK
|
||||
mws-outside: APP_uKniXPELlRERBBwK # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-mws-admin:
|
||||
shortname: "mws-admin"
|
||||
cmii-uav-platform-oms:
|
||||
shortname: "oms"
|
||||
cmii-uav-platform-cms:
|
||||
shortname: "cms"
|
||||
cmii-uav-platform-cms-portal:
|
||||
shortname: "cmsportal"
|
||||
cmii-uav-platform-open:
|
||||
shortname: "open"
|
||||
cmii-uav-platform-splice:
|
||||
shortname: "splice"
|
||||
splice-dev: APP_bYdlPsBBIncZdaYR
|
||||
splice-test: APP_l4HIMixfIXhlCTi9
|
||||
splice-devbase: APP_l4HIMixfIXhlCTi9
|
||||
splice-feature: APP_l4HIMixfIXhlCTi9
|
||||
splice-uat: APP
|
||||
splice-demo: APP_zE0M3sTRXrCIJS8Y
|
||||
splice-outside: APP_zE0M3sTRXrCIJS8Y # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-splice-visual:
|
||||
shortname: "splice-visual"
|
||||
cmii-uav-platform-detection:
|
||||
shortname: "detection"
|
||||
detectiondemo: APP_FDHW2VLVDWPnnOCy
|
||||
detection-outside: APP_FDHW2VLVDWPnnOCy # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-security:
|
||||
shortname: "security"
|
||||
security-demo: APP_JUSEMc7afyWXxvE7
|
||||
security-outside: APP_JUSEMc7afyWXxvE7 # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-visualization:
|
||||
shortname: "visualization"
|
||||
visualization-demo: APP_Jc8i2wOQ1t73QEJS
|
||||
visualization-outside: APP_Jc8i2wOQ1t73QEJS # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-logistics:
|
||||
shortname: "logistics"
|
||||
logistics-demo: APP_PvdfRRRBPL8xbIwl
|
||||
logistics-outside: APP_PvdfRRRBPL8xbIwl
|
||||
cmii-uav-platform-share:
|
||||
shortname: "share"
|
||||
share-demo: APP_4lVSVI0ZGxTssir8
|
||||
share-outside: APP_4lVSVI0ZGxTssir8
|
||||
cmii-uav-platform-base:
|
||||
shortname: "base"
|
||||
base-demo: APP_9LY41OaKSqk2btY0
|
||||
base-outside: APP_9LY41OaKSqk2btY0 # this is actually outside works, copy from demo
|
||||
cmii-uav-platform-traffic-screen:
|
||||
shortname: "traffic"
|
||||
traffic-demo: APP_PvdfRRRBPL8xbIwl
|
||||
traffic-outside: APP_PvdfRRRBPL8xbIwl
|
||||
cmii-uav-platform-emergency-rescue:
|
||||
shortname: "emergency"
|
||||
emergency-demo: APP_aGsTAY1uMZrpKdfk
|
||||
emergency-outside: APP_aGsTAY1uMZrpKdfk
|
||||
|
||||
|
||||
backendApplication:
|
||||
enabled: true
|
||||
type: backend
|
||||
# this ingress is for swagger url, CI/CD url,they can only be accessed only by internal network
|
||||
accessmode: internal
|
||||
manifest:
|
||||
# all backend applications
|
||||
cmii-admin-data: false
|
||||
cmii-admin-user: false
|
||||
cmii-uav-airspace: false
|
||||
cmii-uav-brain: false
|
||||
cmii-uav-clusters: false
|
||||
cmii-uav-data-post-process: false
|
||||
cmii-uav-developer: false
|
||||
cmii-uav-device: false
|
||||
cmii-uav-kpi-monitor: false
|
||||
cmii-uav-live: false
|
||||
cmii-uav-logger: false
|
||||
cmii-uav-mission: false
|
||||
cmii-uav-monitor: false
|
||||
cmii-uav-mqtthandler: false
|
||||
cmii-uav-notice: false
|
||||
cmii-uav-oauth: false
|
||||
cmii-uav-process: false
|
||||
cmii-uav-security-system: false
|
||||
cmii-uav-surveillance: false
|
||||
cmii-uav-user: false
|
||||
cmii-uav-waypoint: false
|
||||
cmii-uav-cms: false
|
||||
cmii-uav-industrial-portfolio: false
|
||||
cmii-project-minio: false
|
||||
cmii-uav-material-warehouse: false
|
||||
cmii-uav-gateway: false
|
||||
cmii-open-gateway: false
|
||||
cmii-admin-gateway: false
|
||||
|
||||
apiGatewayApplication:
|
||||
enabled: true
|
||||
type: "api-gateway"
|
||||
# this ingress is for apis and gateways
|
||||
accessmode: pulic
|
||||
manifest:
|
||||
# all applications need to expose api/gateway to public network
|
||||
# cmii-project-minio: false # deprecated
|
||||
cmii-uav-material-warehouse: true
|
||||
cmii-uav-gateway: false
|
||||
cmii-open-gateway: false
|
||||
cmii-admin-gateway: false
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
24
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/.helmignore
Normal file
24
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/.helmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
ci/*
|
||||
31
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/Chart.yaml
Normal file
31
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/Chart.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: v2
|
||||
name: all-middleware
|
||||
description: including all middlewares for the uavcloud platform,
|
||||
such as mysql, redis, emqx, mongo, rabbitmq, nacos
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 2.2.2
|
||||
|
||||
keywords:
|
||||
- uavcloud
|
||||
- middleware
|
||||
- template
|
||||
- function
|
||||
- chinamobile
|
||||
@@ -0,0 +1,26 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
Jenkinsfile
|
||||
chart_template.yaml
|
||||
emqx.conf
|
||||
@@ -0,0 +1,24 @@
|
||||
apiVersion: v2
|
||||
name: emqx
|
||||
description: emqx middleware, can by deployed in clusterMode or standaloneMode
|
||||
dependend on PVCs in helm-emqxs
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 2.2.0
|
||||
2288
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/charts/emqx/emqx.conf
Normal file
2288
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/charts/emqx/emqx.conf
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,33 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Kubernetes standard labels
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.emqx.labels.standard" -}}
|
||||
cmii.type: {{ .Values.global.application.type }}
|
||||
{{- if .Values.enabled.clusterMode}}
|
||||
cmii.app: {{ .Values.appName.clusterMode }}
|
||||
cmii.emqx.architecture: cluster
|
||||
{{- else }}
|
||||
cmii.app: {{ .Values.appName.standaloneMode }}
|
||||
cmii.emqx.architecture: standalone
|
||||
{{- end }}
|
||||
helm.sh/chart: {{ include "uavcloud-middleware.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ $.Release.Service }}
|
||||
{{- if .Values.global.image.tag }}
|
||||
app.kubernetes.io/version: {{ .Values.global.image.tag | quote }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.emqx.labels.matchLabels" -}}
|
||||
cmii.type: {{ .Values.global.application.type }}
|
||||
{{- if .Values.enabled.clusterMode}}
|
||||
cmii.app: {{ .Values.appName.clusterMode }}
|
||||
cmii.emqx.architecture: cluster
|
||||
{{- else }}
|
||||
cmii.app: {{ .Values.appName.standaloneMode }}
|
||||
cmii.emqx.architecture: standalone
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,24 @@
|
||||
{{- define "uavcloud-middleware.emqx.cluster.config.acl" -}}
|
||||
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
|
||||
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
|
||||
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
|
||||
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
|
||||
{allow, all}.
|
||||
{{- end -}}
|
||||
|
||||
{{- define "uavcloud-middleware.emqx.cluster.config.emqx_auth_username" -}}
|
||||
auth.user.1.username = {{ .Values.auth.username }}
|
||||
auth.user.1.password = {{ .Values.auth.password }}
|
||||
auth.user.password_hash = sha256
|
||||
{{- end -}}
|
||||
|
||||
{{- define "uavcloud-middleware.emqx.cluster.config.loaded_plugins" -}}
|
||||
{emqx_auth_username,true}.
|
||||
{emqx_management, true}.
|
||||
{emqx_recon, true}.
|
||||
{emqx_retainer, false}.
|
||||
{emqx_dashboard, true}.
|
||||
{emqx_telemetry, true}.
|
||||
{emqx_rule_engine, true}.
|
||||
{emqx_bridge_mqtt, false}.
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,41 @@
|
||||
{{- if .Values.enabled.clusterMode }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.clusterMode -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $applicationName }}-env
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
data:
|
||||
EMQX_CLUSTER__K8S__ADDRESS_TYPE: hostname
|
||||
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc:443
|
||||
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
|
||||
EMQX_NAME: {{ $applicationName }}
|
||||
EMQX_CLUSTER__K8S__APP_NAME: {{ $applicationName }}
|
||||
EMQX_CLUSTER__DISCOVERY: k8s
|
||||
EMQX_CLUSTER__K8S__SERVICE_NAME: {{ $applicationName }}-headless
|
||||
EMQX_CLUSTER__K8S__NAMESPACE: {{ $namespace }}
|
||||
EMQX_ALLOW_ANONYMOUS: "false"
|
||||
EMQX_ACL_NOMATCH: "deny"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $applicationName }}-cm
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
data:
|
||||
emqx_auth_username.conf: |-
|
||||
{{- include "uavcloud-middleware.emqx.cluster.config.emqx_auth_username" . | nindent 4 }}
|
||||
|
||||
acl.conf: |-
|
||||
{{- include "uavcloud-middleware.emqx.cluster.config.acl" . | nindent 4 }}
|
||||
|
||||
loaded_plugins: |-
|
||||
{{- include "uavcloud-middleware.emqx.cluster.config.loaded_plugins" . | nindent 4 }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,30 @@
|
||||
{{- if and .Values.enabled.clusterMode .Values.ingress.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.clusterMode -}}
|
||||
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
rules:
|
||||
- host: "emqx.{{ $DeployDomainName }}"
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $applicationName }}
|
||||
servicePort: {{ .Values.containerPort.dashboard }}
|
||||
{{- if .Values.global.ingress.tls_enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- "emqx.{{ $DeployDomainName }}"
|
||||
secretName: "x.{{ $DeployDomainName }}-tls"
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,22 @@
|
||||
{{- if and .Values.enabled.clusterMode .Values.enabled.standaloneMode }}
|
||||
{{/* 不要用这个,使用统一生成的PVC */}}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.clusterMode -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
storageClassName: {{ .Values.global.storageClass.name }}
|
||||
accessModes:
|
||||
- {{ .Values.storageClass.accessMode }}
|
||||
volumeMode: {{ .Values.storageClass.volumeMode }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.storageClass.resources.requests.storage }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,40 @@
|
||||
{{- if .Values.enabled.clusterMode }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.clusterMode -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ $applicationName }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,94 @@
|
||||
{{- if .Values.enabled.clusterMode }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.clusterMode -}}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas.clusterMode }}
|
||||
serviceName: {{ $applicationName }}-headless
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 8 }}
|
||||
spec:
|
||||
{{- if .Values.global.affinity }}
|
||||
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
|
||||
{{- else }}
|
||||
affinity:
|
||||
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
|
||||
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
|
||||
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ $applicationName }}
|
||||
containers:
|
||||
- name: {{ $applicationName }}
|
||||
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/emqx:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.global.image.pullPolicy }}
|
||||
ports:
|
||||
- name: mqtt
|
||||
containerPort: {{ .Values.containerPort.mqtt }}
|
||||
- name: mqttssl
|
||||
containerPort: {{ .Values.containerPort.mqttssl }}
|
||||
- name: mgmt
|
||||
containerPort: {{ .Values.containerPort.mgmt }}
|
||||
- name: ws
|
||||
containerPort: {{ .Values.containerPort.websocket }}
|
||||
- name: wss
|
||||
containerPort: {{ .Values.containerPort.wss }}
|
||||
- name: dashboard
|
||||
containerPort: {{ .Values.containerPort.dashboard }}
|
||||
- name: ekka
|
||||
containerPort: 4370
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: {{ $applicationName }}-env
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.containerPort.mgmt | default 8081 }}
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumeMounts:
|
||||
- name: emqx-data
|
||||
mountPath: "/opt/emqx/data/mnesia"
|
||||
readOnly: false
|
||||
- name: {{ $applicationName }}-cm
|
||||
mountPath: "/opt/emqx/etc/plugins/emqx_auth_username.conf"
|
||||
subPath: emqx_auth_username.conf
|
||||
readOnly: false
|
||||
- name: {{ $applicationName }}-cm
|
||||
mountPath: "/opt/emqx/etc/acl.conf"
|
||||
subPath: "acl.conf"
|
||||
readOnly: false
|
||||
- name: {{ $applicationName }}-cm
|
||||
mountPath: "/opt/emqx/data/loaded_plugins"
|
||||
subPath: loaded_plugins
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: emqx-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-emqxs
|
||||
- name: {{ $applicationName }}-cm
|
||||
configMap:
|
||||
name: {{ $applicationName }}-cm
|
||||
items:
|
||||
- key: emqx_auth_username.conf
|
||||
path: emqx_auth_username.conf
|
||||
- key: acl.conf
|
||||
path: acl.conf
|
||||
- key: loaded_plugins
|
||||
path: loaded_plugins
|
||||
{{- end }}
|
||||
@@ -0,0 +1,36 @@
|
||||
{{- if .Values.enabled.clusterMode }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.clusterMode -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- port: {{ .Values.containerPort.mqtt }}
|
||||
name: mqtt
|
||||
targetPort: {{ .Values.containerPort.mqtt }}
|
||||
{{- if .Values.nodePort.enabled }}
|
||||
nodePort: {{ .Values.nodePort.mqtt }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.containerPort.dashboard }}
|
||||
name: dashboard
|
||||
targetPort: {{ .Values.containerPort.dashboard }}
|
||||
{{- if .Values.nodePort.enabled }}
|
||||
nodePort: {{ .Values.nodePort.dashboard }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.containerPort.websocket }}
|
||||
name: mqtt-websocket
|
||||
targetPort: {{ .Values.containerPort.websocket }}
|
||||
{{- if .Values.nodePort.enabled }}
|
||||
nodePort: {{ .Values.nodePort.mqttWebSocket }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,47 @@
|
||||
{{- if and .Values.enabled.clusterMode}}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.clusterMode -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ $applicationName }}-headless
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
selector:
|
||||
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: mqtt
|
||||
port: {{ .Values.containerPort.mqtt }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.containerPort.mqtt }}
|
||||
- name: mqttssl
|
||||
port: {{ .Values.containerPort.mqttssl }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.containerPort.mqttssl }}
|
||||
- name: mgmt
|
||||
port: {{ .Values.containerPort.mgmt }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.containerPort.mgmt }}
|
||||
- name: websocket
|
||||
port: {{ .Values.containerPort.websocket }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.containerPort.websocket }}
|
||||
- name: wss
|
||||
port: {{ .Values.containerPort.wss }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.containerPort.wss }}
|
||||
- name: dashboard
|
||||
port: {{ .Values.containerPort.dashboard }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.containerPort.dashboard }}
|
||||
- name: ekka
|
||||
port: 4370
|
||||
protocol: TCP
|
||||
targetPort: 4370
|
||||
---
|
||||
{{- end }}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,30 @@
|
||||
{{- if and .Values.enabled.standaloneMode .Values.ingress.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.standaloneMode -}}
|
||||
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
rules:
|
||||
- host: "emqx.{{ $DeployDomainName }}"
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $applicationName }}
|
||||
servicePort: {{ .Values.containerPort.dashboard }}
|
||||
{{- if .Values.global.ingress.tls_enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- "emqx.{{ $DeployDomainName }}"
|
||||
secretName: "x.{{ $DeployDomainName }}-tls"
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,92 @@
|
||||
{{- if and .Values.enabled.standaloneMode (not .Values.enabled.clusterMode) }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.standaloneMode -}}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
serviceName: {{ $applicationName }}
|
||||
replicas: {{ .Values.replicas.standaloneMode }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 8 }}
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
{{- if .Values.global.affinity }}
|
||||
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
|
||||
{{- else }}
|
||||
affinity:
|
||||
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
|
||||
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
|
||||
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ $applicationName }}
|
||||
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/emqx:{{ .Values.image.tag }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.containerPort.mqtt }}
|
||||
name: mqtt
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.containerPort.dashboard }}
|
||||
name: dashboard
|
||||
protocol: TCP
|
||||
- containerPort: {{ .Values.containerPort.websocket }}
|
||||
name: mqtt-websocket
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: {{ $applicationName }}-plugins
|
||||
mountPath: /opt/emqx/data/loaded_plugins
|
||||
subPath: loaded_plugins
|
||||
- name: {{ $applicationName }}-auth
|
||||
mountPath: /opt/emqx/etc/plugins/emqx_auth_username.conf
|
||||
subPath: emqx_auth_username.conf
|
||||
- name: {{ $applicationName }}-core
|
||||
mountPath: /opt/emqx/etc/emqx.conf
|
||||
subPath: emqx.conf
|
||||
defaultMode: 664
|
||||
- name: emqx-data
|
||||
mountPath: /opt/emqx/data/emqx_erl_pipes
|
||||
readOnly: false
|
||||
subPath: {{ $namespace }}/{{ $applicationName }}/data
|
||||
- name: emqx-data
|
||||
mountPath: /opt/emqx/log
|
||||
readOnly: false
|
||||
subPath: {{ $namespace }}/{{ $applicationName }}/log
|
||||
volumes:
|
||||
- name: emqx-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-emqxs
|
||||
- name: {{ $applicationName }}-plugins
|
||||
configMap:
|
||||
name: {{ $applicationName }}-plugins
|
||||
items:
|
||||
- key: loaded_plugins
|
||||
path: loaded_plugins
|
||||
- name: {{ $applicationName }}-auth
|
||||
configMap:
|
||||
name: {{ $applicationName }}-auth
|
||||
items:
|
||||
- key: emqx_auth_username.conf
|
||||
path: emqx_auth_username.conf
|
||||
- name: {{ $applicationName }}-core
|
||||
configMap:
|
||||
name: {{ $applicationName }}-core
|
||||
items:
|
||||
- key: emqx.conf
|
||||
path: emqx.conf
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,36 @@
|
||||
{{- if and .Values.enabled.standaloneMode (not .Values.enabled.clusterMode) }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName.standaloneMode -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.emqx.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.global.middlewareService.type }}
|
||||
selector:
|
||||
{{- include "uavcloud-middleware.emqx.labels.matchLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- port: {{ .Values.containerPort.mqtt }}
|
||||
name: mqtt
|
||||
targetPort: {{ .Values.containerPort.mqtt }}
|
||||
{{- if eq .Values.global.middlewareService.type "NodePort" }}
|
||||
nodePort: {{ .Values.nodePort.mqtt }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.containerPort.dashboard }}
|
||||
name: dashboard
|
||||
targetPort: {{ .Values.containerPort.dashboard }}
|
||||
{{- if eq .Values.global.middlewareService.type "NodePort" }}
|
||||
nodePort: {{ .Values.nodePort.dashboard }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.containerPort.websocket }}
|
||||
name: mqtt-websocket
|
||||
targetPort: {{ .Values.containerPort.websocket }}
|
||||
{{- if eq .Values.global.middlewareService.type "NodePort" }}
|
||||
nodePort: {{ .Values.nodePort.mqttWebSocket }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,54 @@
|
||||
enabled:
|
||||
clusterMode: true
|
||||
standaloneMode: false
|
||||
|
||||
auth:
|
||||
username: cmii
|
||||
password: odD8#Ve7.B
|
||||
|
||||
storageClass:
|
||||
accessMode: "ReadWriteOnce"
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 16Gi
|
||||
|
||||
nodePort:
|
||||
enabled: true
|
||||
mqtt: 31883
|
||||
dashboard: 48083
|
||||
mqttWebSocket: 38083
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
repository: docker.io/emqx # commonly no use
|
||||
tag: 4.2.12
|
||||
|
||||
replicas:
|
||||
clusterMode: 3
|
||||
standaloneMode: 1
|
||||
|
||||
appName:
|
||||
clusterMode: helm-emqxs
|
||||
standaloneMode: helm-emqx
|
||||
|
||||
# please don't modify this values below !!!
|
||||
containerPort:
|
||||
mqtt: 1883
|
||||
mgmt: 8081
|
||||
websocket: 8083
|
||||
wss: 8084
|
||||
mqttssl: 8883
|
||||
dashboard: 18083
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: 2Gi
|
||||
cpu: "1"
|
||||
requests:
|
||||
memory: 1Gi
|
||||
cpu: 300m
|
||||
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
Jenkinsfile
|
||||
chart_template.yaml
|
||||
@@ -0,0 +1,24 @@
|
||||
# Just template
|
||||
apiVersion: v2
|
||||
name: mongo
|
||||
description: uavcloud middleware for mongo
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 2.2.2
|
||||
@@ -0,0 +1,21 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Kubernetes standard labels
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.mongo.labels.standard" -}}
|
||||
cmii.app: {{ .Values.appName }}
|
||||
cmii.type: {{ .Values.global.application.type }}
|
||||
helm.sh/chart: {{ include "uavcloud-middleware.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ $.Release.Service }}
|
||||
{{- if $.Values.global.image.tag }}
|
||||
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.mongo.labels.matchLabels" -}}
|
||||
cmii.app: {{ .Values.appName }}
|
||||
cmii.type: {{ .Values.global.application.type }}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,57 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName -}}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.mongo.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
serviceName: {{ $applicationName }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "uavcloud-middleware.mongo.labels.matchLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.mongo.labels.standard" . | nindent 8 }}
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
{{- if .Values.global.affinity }}
|
||||
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
|
||||
{{- else }}
|
||||
affinity:
|
||||
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
|
||||
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
|
||||
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ $applicationName }}
|
||||
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/mongo:{{ .Values.image.tag }}"
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.containerPort.mongo }}
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: {{ .Values.auth.username }}
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: {{ .Values.auth.password }}
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: {{ $namespace }}/{{ $applicationName }}/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,24 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.mongo.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.global.middlewareService.type }}
|
||||
selector:
|
||||
{{- include "uavcloud-middleware.mongo.labels.matchLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
{{- if eq .Values.global.middlewareService.type "NodePort" }}
|
||||
nodePort: {{ .Values.nodePort.mongo }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,35 @@
|
||||
image:
|
||||
repository: docker.io
|
||||
tag: "5.0"
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
enabled: true
|
||||
|
||||
appName: helm-mongo
|
||||
|
||||
auth:
|
||||
username: cmlc
|
||||
password: REdPza8#oVlt
|
||||
|
||||
nodePort:
|
||||
mongo: 37017
|
||||
|
||||
containerPort:
|
||||
mongo: 27017
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: 4Gi
|
||||
cpu: "2"
|
||||
requests:
|
||||
memory: 2Gi
|
||||
cpu: "1"
|
||||
|
||||
storageClass:
|
||||
accessMode: "ReadWriteMany"
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 16Gi
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
apiVersion: v2
|
||||
name: nacos
|
||||
description: uavcloud middleware for nacos
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.1.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 2.2.2
|
||||
@@ -0,0 +1,253 @@
|
||||
/*
|
||||
* Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = cmii_nacos_config */
|
||||
/* 表名称 = config_info */
|
||||
/******************************************/
|
||||
|
||||
# CREATE SCHEMA cmii_nacos_config;
|
||||
|
||||
CREATE TABLE `config_info`
|
||||
(
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(255) DEFAULT NULL,
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
`app_name` varchar(128) DEFAULT NULL,
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
`c_desc` varchar(256) DEFAULT NULL,
|
||||
`c_use` varchar(64) DEFAULT NULL,
|
||||
`effect` varchar(64) DEFAULT NULL,
|
||||
`type` varchar(64) DEFAULT NULL,
|
||||
`c_schema` text,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`, `group_id`, `tenant_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='config_info';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_info_aggr */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_aggr`
|
||||
(
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(255) NOT NULL COMMENT 'group_id',
|
||||
`datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
|
||||
`content` longtext NOT NULL COMMENT '内容',
|
||||
`gmt_modified` datetime NOT NULL COMMENT '修改时间',
|
||||
`app_name` varchar(128) DEFAULT NULL,
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`, `group_id`, `tenant_id`, `datum_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='增加租户字段';
|
||||
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_info_beta */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_beta`
|
||||
(
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`, `group_id`, `tenant_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='config_info_beta';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_info_tag */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_tag`
|
||||
(
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
|
||||
`tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`, `group_id`, `tenant_id`, `tag_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='config_info_tag';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_tags_relation */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_tags_relation`
|
||||
(
|
||||
`id` bigint(20) NOT NULL COMMENT 'id',
|
||||
`tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
|
||||
`tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
|
||||
`nid` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
PRIMARY KEY (`nid`),
|
||||
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`, `tag_name`, `tag_type`),
|
||||
KEY `idx_tenant_id` (`tenant_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='config_tag_relation';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = group_capacity */
|
||||
/******************************************/
|
||||
CREATE TABLE `group_capacity`
|
||||
(
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
|
||||
`group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群',
|
||||
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
|
||||
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
|
||||
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值',
|
||||
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_group_id` (`group_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='集群、各Group容量信息表';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = his_config_info */
|
||||
/******************************************/
|
||||
CREATE TABLE `his_config_info`
|
||||
(
|
||||
`id` bigint(64) unsigned NOT NULL,
|
||||
`nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`data_id` varchar(255) NOT NULL,
|
||||
`group_id` varchar(128) NOT NULL,
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL,
|
||||
`md5` varchar(32) DEFAULT NULL,
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`src_user` text,
|
||||
`src_ip` varchar(50) DEFAULT NULL,
|
||||
`op_type` char(10) DEFAULT NULL,
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
PRIMARY KEY (`nid`),
|
||||
KEY `idx_gmt_create` (`gmt_create`),
|
||||
KEY `idx_gmt_modified` (`gmt_modified`),
|
||||
KEY `idx_did` (`data_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='多租户改造';
|
||||
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = tenant_capacity */
|
||||
/******************************************/
|
||||
CREATE TABLE `tenant_capacity`
|
||||
(
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
|
||||
`tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
|
||||
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
|
||||
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
|
||||
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
|
||||
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_tenant_id` (`tenant_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='租户容量信息表';
|
||||
|
||||
|
||||
CREATE TABLE `tenant_info`
|
||||
(
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kp` varchar(128) NOT NULL COMMENT 'kp',
|
||||
`tenant_id` varchar(128) default '' COMMENT 'tenant_id',
|
||||
`tenant_name` varchar(128) default '' COMMENT 'tenant_name',
|
||||
`tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
|
||||
`create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
|
||||
`gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
|
||||
`gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`, `tenant_id`),
|
||||
KEY `idx_tenant_id` (`tenant_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8
|
||||
COLLATE = utf8_bin COMMENT ='tenant_info';
|
||||
|
||||
CREATE TABLE `users`
|
||||
(
|
||||
`username` varchar(50) NOT NULL PRIMARY KEY,
|
||||
`password` varchar(500) NOT NULL,
|
||||
`enabled` boolean NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE `roles`
|
||||
(
|
||||
`username` varchar(50) NOT NULL,
|
||||
`role` varchar(50) NOT NULL,
|
||||
UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
|
||||
);
|
||||
|
||||
CREATE TABLE `permissions`
|
||||
(
|
||||
`role` varchar(50) NOT NULL,
|
||||
`resource` varchar(255) NOT NULL,
|
||||
`action` varchar(8) NOT NULL,
|
||||
UNIQUE INDEX `uk_role_permission` (`role`, `resource`, `action`) USING BTREE
|
||||
);
|
||||
|
||||
INSERT INTO users (username, password, enabled)
|
||||
VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
|
||||
|
||||
INSERT INTO roles (username, role)
|
||||
VALUES ('nacos', 'ROLE_ADMIN');
|
||||
@@ -0,0 +1,21 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Kubernetes standard labels
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.nacos.labels.standard" -}}
|
||||
cmii.app: {{ .Values.appName }}
|
||||
cmii.type: {{ .Values.global.application.type }}
|
||||
helm.sh/chart: {{ include "uavcloud-middleware.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ $.Release.Service }}
|
||||
{{- if $.Values.global.image.tag }}
|
||||
app.kubernetes.io/version: {{ $.Values.global.image.tag | quote }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.nacos.labels.matchLabels" -}}
|
||||
cmii.app: {{ .Values.appName }}
|
||||
cmii.type: {{ .Values.global.application.type }}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,19 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $applicationName }}-cm
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
|
||||
data:
|
||||
mysql.db.name: "{{ .Values.database.db_name }}"
|
||||
mysql.db.host: "{{ .Values.database.host }}"
|
||||
mysql.port: "{{ .Values.database.port }}"
|
||||
mysql.user: "{{ .Values.database.username }}"
|
||||
mysql.password: "{{ .Values.database.password }}"
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,30 @@
|
||||
{{- if and .Values.enabled .Values.ingress.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName -}}
|
||||
{{- $DeployDomainName := ternary (first (regexSplit ":" .Values.global.domain.DeployDomainName -1)) ( .Values.global.domain.DeployDomainName ) (contains ":" .Values.global.domain.DeployDomainName) -}}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
rules:
|
||||
- host: "nacos.{{ $DeployDomainName }}"
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: {{ $applicationName }}
|
||||
servicePort: {{ .Values.containerPort.dashboard }}
|
||||
{{- if .Values.global.ingress.tls_enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- "nacos.{{ $DeployDomainName }}"
|
||||
secretName: "x.{{ $DeployDomainName }}-tls"
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,82 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName -}}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
serviceName: {{ $applicationName }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "uavcloud-middleware.nacos.labels.matchLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 8 }}
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
{{- if .Values.global.affinity }}
|
||||
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.global.affinity "context" $) | nindent 8 }}
|
||||
{{- else }}
|
||||
affinity:
|
||||
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
|
||||
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
|
||||
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/nacos-server:{{ .Values.image.tag }}"
|
||||
ports:
|
||||
- containerPort: {{ .Values.containerPort.dashboard }}
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "true"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ $applicationName }}-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ $applicationName }}-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ $applicationName }}-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ $applicationName }}-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ $applicationName }}-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "{{ .Values.containerPort.dashboard }}"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "{{ .Values.containerPort.dashboard }}"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,24 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
{{- $applicationName := .Values.appName -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ $applicationName }}
|
||||
namespace: {{ $namespace }}
|
||||
labels:
|
||||
{{- include "uavcloud-middleware.nacos.labels.standard" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.global.middlewareService.type }}
|
||||
selector:
|
||||
{{- include "uavcloud-middleware.nacos.labels.matchLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- port: {{ .Values.containerPort.dashboard }}
|
||||
name: server
|
||||
targetPort: {{ .Values.containerPort.dashboard }}
|
||||
{{- if eq .Values.global.middlewareService.type "NodePort" }}
|
||||
nodePort: {{ .Values.nodePort.dashboard }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
@@ -0,0 +1,34 @@
|
||||
image:
|
||||
repository: docker.io/nacos
|
||||
tag: 2.0.1
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
|
||||
enabled: true
|
||||
|
||||
appName: helm-nacos
|
||||
|
||||
nodePort:
|
||||
dashboard: 38848
|
||||
|
||||
database:
|
||||
host: helm-mysql
|
||||
port: 6033
|
||||
username: k8s_admin
|
||||
password: EWde2cKP9w.G
|
||||
db_name: nacos_config
|
||||
|
||||
containerPort:
|
||||
dashboard: 8848
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: 2Gi
|
||||
cpu: "1"
|
||||
requests:
|
||||
memory: 1Gi
|
||||
cpu: 500m
|
||||
@@ -0,0 +1,21 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -0,0 +1,6 @@
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 1.10.3
|
||||
digest: sha256:710e8247ae70ea63a2fb2fde4320511ff28c7b5c7a738861427f104a7718bdf4
|
||||
generated: "2021-12-02T17:29:16.053850737Z"
|
||||
@@ -0,0 +1,26 @@
|
||||
annotations:
|
||||
category: Infrastructure
|
||||
apiVersion: v2
|
||||
appVersion: 3.9.12
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
tags:
|
||||
- bitnami-common
|
||||
version: 1.x.x
|
||||
description: Open source message broker software that implements the Advanced Message
|
||||
Queuing Protocol (AMQP)
|
||||
home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq
|
||||
icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png
|
||||
keywords:
|
||||
- rabbitmq
|
||||
- message queue
|
||||
- AMQP
|
||||
maintainers:
|
||||
- email: containers@bitnami.com
|
||||
name: Bitnami
|
||||
name: rabbitmq
|
||||
sources:
|
||||
- https://github.com/bitnami/bitnami-docker-rabbitmq
|
||||
- https://www.rabbitmq.com
|
||||
version: 8.26.1
|
||||
@@ -0,0 +1,592 @@
|
||||
# RabbitMQ
|
||||
|
||||
[RabbitMQ](https://www.rabbitmq.com/) is an open source multi-protocol message broker.
|
||||
|
||||
## TL;DR
|
||||
|
||||
```bash
|
||||
$ helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
$ helm install my-release bitnami/rabbitmq
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.12+
|
||||
- Helm 3.1.0
|
||||
- PV provisioner support in the underlying infrastructure
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```bash
|
||||
$ helm install my-release bitnami/rabbitmq
|
||||
```
|
||||
|
||||
The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```bash
|
||||
$ helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Parameters
|
||||
|
||||
### Global parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ----------------------------------------------- | ----- |
|
||||
| `global.imageRegistry` | Global Docker image registry | `""` |
|
||||
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
|
||||
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
|
||||
|
||||
|
||||
### RabbitMQ Image parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------- | -------------------------------------------------------------- | --------------------- |
|
||||
| `image.registry` | RabbitMQ image registry | `docker.io` |
|
||||
| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` |
|
||||
| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.9.12-debian-10-r0` |
|
||||
| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
|
||||
| `image.debug` | Set to true if you would like to see extra information on logs | `false` |
|
||||
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- |
|
||||
| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` |
|
||||
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
|
||||
| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
|
||||
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
|
||||
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
|
||||
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
|
||||
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
|
||||
| `hostAliases` | Deployment pod host aliases | `[]` |
|
||||
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
|
||||
| `auth.username` | RabbitMQ application username | `user` |
|
||||
| `auth.password` | RabbitMQ application password | `""` |
|
||||
| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` |
|
||||
| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` |
|
||||
| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` |
|
||||
| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` |
|
||||
| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` |
|
||||
| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` |
|
||||
| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` |
|
||||
| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` |
|
||||
| `auth.tls.serverCertificate` | Server certificate content | `""` |
|
||||
| `auth.tls.serverKey` | Server private key content | `""` |
|
||||
| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` |
|
||||
| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` |
|
||||
| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` |
|
||||
| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` |
|
||||
| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` |
|
||||
| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` |
|
||||
| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` |
|
||||
| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` |
|
||||
| `memoryHighWatermark.value` | Memory high watermark value | `0.4` |
|
||||
| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` |
|
||||
| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` |
|
||||
| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` |
|
||||
| `clustering.enabled` | Enable RabbitMQ clustering | `true` |
|
||||
| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` |
|
||||
| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` |
|
||||
| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` |
|
||||
| `clustering.partitionHandling` | Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` | `autoheal` |
|
||||
| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` |
|
||||
| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` |
|
||||
| `command` | Override default container command (useful when using custom images) | `[]` |
|
||||
| `args` | Override default container args (useful when using custom images) | `[]` |
|
||||
| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` |
|
||||
| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` |
|
||||
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` |
|
||||
| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` |
|
||||
| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` |
|
||||
| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` |
|
||||
| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` |
|
||||
| `advancedConfiguration` | Configuration file content: advanced configuration | `""` |
|
||||
| `ldap.enabled` | Enable LDAP support | `false` |
|
||||
| `ldap.servers` | List of LDAP servers hostnames | `[]` |
|
||||
| `ldap.port` | LDAP servers port | `389` |
|
||||
| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` |
|
||||
| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` |
|
||||
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` |
|
||||
| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` |
|
||||
| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` |
|
||||
| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with <release-name> prepended. | `false` |
|
||||
|
||||
|
||||
### Statefulset parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- |
|
||||
| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` |
|
||||
| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
|
||||
| `podManagementPolicy` | Pod management policy | `OrderedReady` |
|
||||
| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` |
|
||||
| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` |
|
||||
| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` |
|
||||
| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` |
|
||||
| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` |
|
||||
| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
|
||||
| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
|
||||
| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
|
||||
| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` |
|
||||
| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
|
||||
| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` |
|
||||
| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
|
||||
| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` |
|
||||
| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` |
|
||||
| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` |
|
||||
| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` |
|
||||
| `resources.limits` | The resources limits for RabbitMQ containers | `{}` |
|
||||
| `resources.requests` | The requested resources for RabbitMQ containers | `{}` |
|
||||
| `livenessProbe.enabled` | Enable livenessProbe | `true` |
|
||||
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` |
|
||||
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` |
|
||||
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` |
|
||||
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
|
||||
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
|
||||
| `readinessProbe.enabled` | Enable readinessProbe | `true` |
|
||||
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` |
|
||||
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` |
|
||||
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` |
|
||||
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
|
||||
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
|
||||
| `customLivenessProbe` | Override default liveness probe | `{}` |
|
||||
| `customReadinessProbe` | Override default readiness probe | `{}` |
|
||||
| `customStartupProbe` | Define a custom startup probe | `{}` |
|
||||
| `initContainers` | Add init containers to the RabbitMQ pod | `[]` |
|
||||
| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` |
|
||||
| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` |
|
||||
| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` |
|
||||
| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
|
||||
|
||||
|
||||
### RBAC parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | --------------------------------------------------- | ------ |
|
||||
| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` |
|
||||
| `serviceAccount.name` | Name of the created serviceAccount | `""` |
|
||||
| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `true` |
|
||||
| `rbac.create` | Whether RBAC rules should be created | `true` |
|
||||
|
||||
|
||||
### Persistence parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------- | ------------------------------------------------ | --------------- |
|
||||
| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` |
|
||||
| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` |
|
||||
| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` |
|
||||
| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` |
|
||||
| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` |
|
||||
| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` |
|
||||
| `persistence.volumes` | Additional volumes without creating PVC | `[]` |
|
||||
| `persistence.annotations` | Persistence annotations. Evaluated as a template | `{}` |
|
||||
|
||||
|
||||
### Exposure parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
|
||||
| `service.type` | Kubernetes Service type | `ClusterIP` |
|
||||
| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` |
|
||||
| `service.port` | Amqp port | `5672` |
|
||||
| `service.portName` | Amqp service port name | `amqp` |
|
||||
| `service.tlsPort` | Amqp TLS port | `5671` |
|
||||
| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` |
|
||||
| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` |
|
||||
| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` |
|
||||
| `service.distPort` | Erlang distribution server port | `25672` |
|
||||
| `service.distPortName` | Erlang distribution service port name | `dist` |
|
||||
| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` |
|
||||
| `service.managerPortEnabled` | RabbitMQ Manager port | `true` |
|
||||
| `service.managerPort` | RabbitMQ Manager port | `15672` |
|
||||
| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` |
|
||||
| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` |
|
||||
| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` |
|
||||
| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` |
|
||||
| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` |
|
||||
| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` |
|
||||
| `service.epmdPortName` | EPMD Discovery service port name | `epmd` |
|
||||
| `service.extraPorts` | Extra ports to expose in the service | `[]` |
|
||||
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` |
|
||||
| `service.externalIPs` | Set the ExternalIPs | `[]` |
|
||||
| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
|
||||
| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` |
|
||||
| `service.labels` | Service labels. Evaluated as a template | `{}` |
|
||||
| `service.annotations` | Service annotations. Evaluated as a template | `{}` |
|
||||
| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` |
|
||||
| `ingress.enabled` | Enable ingress resource for Management console | `false` |
|
||||
| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` |
|
||||
| `ingress.pathType` | Ingress path type | `ImplementationSpecific` |
|
||||
| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` |
|
||||
| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
|
||||
| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` |
|
||||
| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` |
|
||||
| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` |
|
||||
| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` |
|
||||
| `ingress.secrets` | Custom TLS certificates as secrets | `[]` |
|
||||
| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
|
||||
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` |
|
||||
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
|
||||
| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` |
|
||||
|
||||
|
||||
### Metrics Parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------------------------ | ------------------------------------------------------------------------------------------------------ | --------------------- |
|
||||
| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` |
|
||||
| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` |
|
||||
| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` |
|
||||
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` |
|
||||
| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` |
|
||||
| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` |
|
||||
| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` |
|
||||
| `metrics.serviceMonitor.relabellings` | MetricsRelabelConfigs to apply to samples before ingestion. DEPRECATED: Will be removed in next major. | `[]` |
|
||||
| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` |
|
||||
| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` |
|
||||
| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` |
|
||||
| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` |
|
||||
| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` |
|
||||
| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` |
|
||||
| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` |
|
||||
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` |
|
||||
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` |
|
||||
| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` |
|
||||
| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` |
|
||||
|
||||
|
||||
### Init Container Parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- |
|
||||
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
|
||||
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
|
||||
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
|
||||
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r301` |
|
||||
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
|
||||
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
|
||||
| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
|
||||
| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
|
||||
|
||||
|
||||
The above parameters map to the env variables defined in [bitnami/rabbitmq](https://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](https://github.com/bitnami/bitnami-docker-rabbitmq) image documentation.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```bash
|
||||
$ helm install my-release \
|
||||
--set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \
|
||||
bitnami/rabbitmq
|
||||
```
|
||||
|
||||
The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`.
|
||||
|
||||
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
|
||||
|
||||
```bash
|
||||
$ helm install my-release -f values.yaml bitnami/rabbitmq
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
## Configuration and installation details
|
||||
|
||||
### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
|
||||
|
||||
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
|
||||
|
||||
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
|
||||
|
||||
### Set pod affinity
|
||||
|
||||
This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
|
||||
|
||||
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
|
||||
|
||||
### Scale horizontally
|
||||
|
||||
To horizontally scale this chart once it has been deployed, two options are available:
|
||||
|
||||
- Use the `kubectl scale` command.
|
||||
- Upgrade the chart modifying the `replicaCount` parameter.
|
||||
|
||||
> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart.
|
||||
|
||||
When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command.
|
||||
|
||||
Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/).
|
||||
|
||||
### Enable TLS support
|
||||
|
||||
To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation).
|
||||
|
||||
Once the certificates are generated, you have two alternatives:
|
||||
|
||||
* Create a secret with the certificates and associate the secret when deploying the chart
|
||||
* Include the certificates in the *values.yaml* file when deploying the chart
|
||||
|
||||
Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate.
|
||||
|
||||
Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed.
|
||||
|
||||
Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls-ingress/).
|
||||
|
||||
### Load custom definitions
|
||||
|
||||
It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](https://www.rabbitmq.com/management.html#load-definitions).
|
||||
|
||||
Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value.
|
||||
|
||||
Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`.
|
||||
|
||||
> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters).
|
||||
|
||||
If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values.
|
||||
|
||||
Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/).
|
||||
|
||||
### Configure LDAP support
|
||||
|
||||
LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/).
|
||||
|
||||
### Configure memory high watermark
|
||||
|
||||
It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives:
|
||||
|
||||
* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below:
|
||||
|
||||
```
|
||||
memoryHighWatermark.enabled="true"
|
||||
memoryHighWatermark.type="absolute"
|
||||
memoryHighWatermark.value="512MB"
|
||||
```
|
||||
|
||||
* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below:
|
||||
|
||||
```
|
||||
memoryHighWatermark.enabled="true"
|
||||
memoryHighWatermark.type="relative"
|
||||
memoryHighWatermark.value="0.4"
|
||||
resources.limits.memory="2Gi"
|
||||
```
|
||||
|
||||
### Add extra environment variables
|
||||
|
||||
In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
|
||||
|
||||
```yaml
|
||||
extraEnvVars:
|
||||
- name: LOG_LEVEL
|
||||
value: error
|
||||
```
|
||||
|
||||
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties.
|
||||
|
||||
### Use plugins
|
||||
|
||||
The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s.
|
||||
|
||||
To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ.
|
||||
|
||||
Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/).
|
||||
|
||||
### Recover the cluster from complete shutdown
|
||||
|
||||
> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand.
|
||||
|
||||
The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover.
|
||||
|
||||
This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state:
|
||||
|
||||
```console
|
||||
$ kubectl delete statefulset STATEFULSET_NAME --cascade=false
|
||||
$ helm upgrade RELEASE_NAME bitnami/rabbitmq \
|
||||
--set podManagementPolicy=Parallel \
|
||||
--set replicaCount=NUMBER_OF_REPLICAS \
|
||||
--set auth.password=PASSWORD \
|
||||
--set auth.erlangCookie=ERLANG_COOKIE
|
||||
```
|
||||
|
||||
For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests.
|
||||
|
||||
If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod):
|
||||
|
||||
```console
|
||||
$ helm upgrade RELEASE_NAME bitnami/rabbitmq \
|
||||
--set podManagementPolicy=Parallel \
|
||||
--set clustering.forceBoot=true \
|
||||
--set replicaCount=NUMBER_OF_REPLICAS \
|
||||
--set auth.password=PASSWORD \
|
||||
--set auth.erlangCookie=ERLANG_COOKIE
|
||||
```
|
||||
|
||||
More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting).
|
||||
|
||||
### Known issues
|
||||
|
||||
- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods.
|
||||
|
||||
## Persistence
|
||||
|
||||
The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container.
|
||||
|
||||
The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined.
|
||||
|
||||
### Use existing PersistentVolumeClaims
|
||||
|
||||
1. Create the PersistentVolume
|
||||
1. Create the PersistentVolumeClaim
|
||||
1. Install the chart
|
||||
|
||||
```bash
|
||||
$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq
|
||||
```
|
||||
|
||||
### Adjust permissions of the persistence volume mountpoint
|
||||
|
||||
As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
|
||||
|
||||
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
|
||||
As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination.
|
||||
|
||||
You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`.
|
||||
|
||||
### Configure the default user/vhost
|
||||
|
||||
If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`:
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
username: default-user
|
||||
extraConfiguration: |-
|
||||
default_vhost = default-vhost
|
||||
default_permissions.configure = .*
|
||||
default_permissions.read = .*
|
||||
default_permissions.write = .*
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
|
||||
|
||||
## Upgrading
|
||||
|
||||
It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart:
|
||||
|
||||
```bash
|
||||
$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE]
|
||||
```
|
||||
|
||||
| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes.
|
||||
|
||||
### To 8.21.0
|
||||
|
||||
This new version of the chart bumps the RabbitMQ version to `3.9.1`. It is considered a minor release, and no breaking changes are expected. Additionally, RabbitMQ `3.9.X` nodes can run alongside `3.8.X` nodes.
|
||||
|
||||
See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) and the [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) for further documentation.
|
||||
|
||||
### To 8.0.0
|
||||
|
||||
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
|
||||
|
||||
[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/).
|
||||
|
||||
### To 7.0.0
|
||||
|
||||
- Several parameters were renamed or disappeared in favor of new ones on this major version:
|
||||
- `replicas` is renamed to `replicaCount`.
|
||||
- `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`.
|
||||
- Authentication parameters were reorganized under the `auth.*` parameter:
|
||||
- `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively.
|
||||
- `rabbitmq.tls.*` parameters are now under `auth.tls.*`.
|
||||
- Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`.
|
||||
- `rabbitmq.rabbitmqClusterNodeName` is deprecated.
|
||||
- `rabbitmq.setUlimitNofiles` is deprecated.
|
||||
- `forceBoot.enabled` is renamed to `clustering.forceBoot`.
|
||||
- `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`.
|
||||
- `metics.port` is remamed to `service.metricsPort`.
|
||||
- `service.extraContainerPorts` is renamed to `extraContainerPorts`.
|
||||
- `service.nodeTlsPort` is renamed to `service.tlsNodePort`.
|
||||
- `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`.
|
||||
- `rbacEnabled` -> deprecated in favor of `rbac.create`.
|
||||
- New parameters: `serviceAccount.create`, and `serviceAccount.name`.
|
||||
- New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`.
|
||||
- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices.
|
||||
- Initialization logic now relies on the container.
|
||||
- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
|
||||
|
||||
Consequences:
|
||||
|
||||
- Backwards compatibility is not guaranteed.
|
||||
- Compatibility with non Bitnami images is not guaranteed anymore.
|
||||
|
||||
### To 6.0.0
|
||||
|
||||
This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y.
|
||||
|
||||
### To 5.0.0
|
||||
|
||||
This major release changes the clustering method from `ip` to `hostname`.
|
||||
This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change.
|
||||
|
||||
> IMPORTANT: Note that if you upgrade from a previous version you will lose your data.
|
||||
|
||||
### To 3.0.0
|
||||
|
||||
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
|
||||
Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq:
|
||||
|
||||
```console
|
||||
$ kubectl delete statefulset rabbitmq --cascade=false
|
||||
```
|
||||
|
||||
## Bitnami Kubernetes Documentation
|
||||
|
||||
Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources:
|
||||
|
||||
- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/)
|
||||
- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/)
|
||||
- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/)
|
||||
- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/)
|
||||
- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/)
|
||||
|
||||
## License
|
||||
|
||||
Copyright © 2022 Bitnami
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -0,0 +1,163 @@
|
||||
CHART NAME: {{ .Chart.Name }}
|
||||
CHART VERSION: {{ .Chart.Version }}
|
||||
APP VERSION: {{ .Chart.AppVersion }}
|
||||
|
||||
{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}}
|
||||
{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}}
|
||||
** Please be patient while the chart is being deployed **
|
||||
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
|
||||
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
|
||||
|
||||
Get the list of pods by executing:
|
||||
|
||||
kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
|
||||
|
||||
Access the pod you want to debug by executing
|
||||
|
||||
kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
|
||||
|
||||
In order to replicate the container startup scripts execute this command:
|
||||
|
||||
/opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh
|
||||
|
||||
{{- else }}
|
||||
|
||||
Credentials:
|
||||
|
||||
{{- if not .Values.loadDefinition.enabled }}
|
||||
echo "Username : {{ .Values.auth.username }}"
|
||||
echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)"
|
||||
{{- end }}
|
||||
echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)"
|
||||
|
||||
Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid.
|
||||
This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading.
|
||||
More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases.
|
||||
|
||||
RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }}
|
||||
|
||||
To access for outside the cluster, perform the following steps:
|
||||
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
|
||||
To Access the RabbitMQ AMQP port:
|
||||
|
||||
1. Obtain the NodePort IP and ports:
|
||||
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }})
|
||||
echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/"
|
||||
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
|
||||
To Access the RabbitMQ AMQP port:
|
||||
|
||||
1. Obtain the LoadBalancer IP:
|
||||
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}'
|
||||
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
|
||||
echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/"
|
||||
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
|
||||
To Access the RabbitMQ AMQP port:
|
||||
|
||||
1. Create a port-forward to the AMQP port:
|
||||
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} &
|
||||
echo "URL : amqp://127.0.0.1:{{ $servicePort }}/"
|
||||
|
||||
{{- end }}
|
||||
|
||||
2. Access RabbitMQ using using the obtained URL.
|
||||
|
||||
To Access the RabbitMQ Management interface:
|
||||
|
||||
1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP:
|
||||
|
||||
export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters
|
||||
echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/"
|
||||
echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts
|
||||
|
||||
2. Open a browser and access RabbitMQ Management using the obtained URL.
|
||||
|
||||
{{- else }}
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
|
||||
Obtain the NodePort IP and ports:
|
||||
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }})
|
||||
export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }})
|
||||
|
||||
To Access the RabbitMQ AMQP port:
|
||||
|
||||
echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/"
|
||||
|
||||
To Access the RabbitMQ Management interface:
|
||||
|
||||
echo "URL : http://$NODE_IP:$NODE_PORT_STATS/"
|
||||
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
|
||||
Obtain the LoadBalancer IP:
|
||||
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}'
|
||||
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
|
||||
|
||||
To Access the RabbitMQ AMQP port:
|
||||
|
||||
echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/"
|
||||
|
||||
To Access the RabbitMQ Management interface:
|
||||
|
||||
echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/"
|
||||
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
|
||||
To Access the RabbitMQ AMQP port:
|
||||
|
||||
echo "URL : amqp://127.0.0.1:{{ $servicePort }}/"
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }}
|
||||
|
||||
To Access the RabbitMQ Management interface:
|
||||
|
||||
echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/"
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.metrics.enabled }}
|
||||
|
||||
To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running:
|
||||
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} &
|
||||
echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics"
|
||||
|
||||
Then, open the obtained URL in a browser.
|
||||
|
||||
{{- end }}
|
||||
|
||||
{{- include "common.warnings.rollingTag" .Values.image }}
|
||||
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
|
||||
{{- include "rabbitmq.validateValues" . -}}
|
||||
|
||||
{{- $requiredPassword := list -}}
|
||||
{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}}
|
||||
|
||||
{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}}
|
||||
{{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}}
|
||||
{{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,257 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "rabbitmq.name" -}}
|
||||
{{- include "common.names.name" . -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "rabbitmq.fullname" -}}
|
||||
{{- include "common.names.fullname" . -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper RabbitMQ image name
|
||||
*/}}
|
||||
{{- define "rabbitmq.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper image name (for the init container volume-permissions image)
|
||||
*/}}
|
||||
{{- define "rabbitmq.volumePermissions.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper Docker Image Registry Secret Names
|
||||
*/}}
|
||||
{{- define "rabbitmq.imagePullSecrets" -}}
|
||||
{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return podAnnotations
|
||||
*/}}
|
||||
{{- define "rabbitmq.podAnnotations" -}}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }}
|
||||
{{- end }}
|
||||
{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
|
||||
{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "rabbitmq.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get the password secret.
|
||||
*/}}
|
||||
{{- define "rabbitmq.secretPasswordName" -}}
|
||||
{{- if .Values.auth.existingPasswordSecret -}}
|
||||
{{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s" (include "rabbitmq.fullname" .) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get the erlang secret.
|
||||
*/}}
|
||||
{{- define "rabbitmq.secretErlangName" -}}
|
||||
{{- if .Values.auth.existingErlangSecret -}}
|
||||
{{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s" (include "rabbitmq.fullname" .) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get the TLS secret.
|
||||
*/}}
|
||||
{{- define "rabbitmq.tlsSecretName" -}}
|
||||
{{- if .Values.auth.tls.existingSecret -}}
|
||||
{{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-certs" (include "rabbitmq.fullname" .) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return true if a TLS credentials secret object should be created
|
||||
*/}}
|
||||
{{- define "rabbitmq.createTlsSecret" -}}
|
||||
{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }}
|
||||
{{- true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper RabbitMQ plugin list
|
||||
*/}}
|
||||
{{- define "rabbitmq.plugins" -}}
|
||||
{{- $plugins := .Values.plugins -}}
|
||||
{{- if .Values.extraPlugins -}}
|
||||
{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.metrics.enabled -}}
|
||||
{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}}
|
||||
{{- end -}}
|
||||
{{- printf "%s" $plugins | replace " " ", " -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the number of bytes given a value
|
||||
following a base 2 o base 10 number system.
|
||||
Usage:
|
||||
{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }}
|
||||
*/}}
|
||||
{{- define "rabbitmq.toBytes" -}}
|
||||
{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }}
|
||||
{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }}
|
||||
{{- if eq $unit "Ki" }}
|
||||
{{- mul $value 1024 }}
|
||||
{{- else if eq $unit "Mi" }}
|
||||
{{- mul $value 1024 1024 }}
|
||||
{{- else if eq $unit "Gi" }}
|
||||
{{- mul $value 1024 1024 1024 }}
|
||||
{{- else if eq $unit "Ti" }}
|
||||
{{- mul $value 1024 1024 1024 1024 }}
|
||||
{{- else if eq $unit "Pi" }}
|
||||
{{- mul $value 1024 1024 1024 1024 1024 }}
|
||||
{{- else if eq $unit "Ei" }}
|
||||
{{- mul $value 1024 1024 1024 1024 1024 1024 }}
|
||||
{{- else if eq $unit "K" }}
|
||||
{{- mul $value 1000 }}
|
||||
{{- else if eq $unit "M" }}
|
||||
{{- mul $value 1000 1000 }}
|
||||
{{- else if eq $unit "G" }}
|
||||
{{- mul $value 1000 1000 1000 }}
|
||||
{{- else if eq $unit "T" }}
|
||||
{{- mul $value 1000 1000 1000 1000 }}
|
||||
{{- else if eq $unit "P" }}
|
||||
{{- mul $value 1000 1000 1000 1000 1000 }}
|
||||
{{- else if eq $unit "E" }}
|
||||
{{- mul $value 1000 1000 1000 1000 1000 1000 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return true if cert-manager required annotations for TLS signed certificates are set in the Ingress annotations
|
||||
Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
||||
*/}}
|
||||
{{- define "rabbitmq.ingress.certManagerRequest" -}}
|
||||
{{ if or (hasKey . "cert-manager.io/cluster-issuer") (hasKey . "cert-manager.io/issuer") }}
|
||||
{{- true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Compile all warnings into a single message, and call fail.
|
||||
*/}}
|
||||
{{- define "rabbitmq.validateValues" -}}
|
||||
{{- $messages := list -}}
|
||||
{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}}
|
||||
{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}}
|
||||
{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}}
|
||||
{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}}
|
||||
{{- $messages := without $messages "" -}}
|
||||
{{- $message := join "\n" $messages -}}
|
||||
|
||||
{{- if $message -}}
|
||||
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Validate values of rabbitmq - LDAP support
|
||||
*/}}
|
||||
{{- define "rabbitmq.validateValues.ldap" -}}
|
||||
{{- if .Values.ldap.enabled }}
|
||||
{{- $serversListLength := len .Values.ldap.servers }}
|
||||
{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }}
|
||||
rabbitmq: LDAP
|
||||
Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers",
|
||||
"ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them:
|
||||
|
||||
$ helm install {{ .Release.Name }} bitnami/rabbitmq \
|
||||
--set ldap.enabled=true \
|
||||
--set ldap.servers[0]="lmy-ldap-server" \
|
||||
--set ldap.port="389" \
|
||||
--set user_dn_pattern="cn=${username},dc=example,dc=org"
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Validate values of rabbitmq - Memory high watermark
|
||||
*/}}
|
||||
{{- define "rabbitmq.validateValues.memoryHighWatermark" -}}
|
||||
{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }}
|
||||
rabbitmq: memoryHighWatermark.type
|
||||
Invalid Memory high watermark type. Valid values are "absolute" and
|
||||
"relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx")
|
||||
{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }}
|
||||
rabbitmq: memoryHighWatermark
|
||||
You enabled configuring memory high watermark using a relative limit. However,
|
||||
no memory limits were defined at POD level. Define your POD limits as shown below:
|
||||
|
||||
$ helm install {{ .Release.Name }} bitnami/rabbitmq \
|
||||
--set memoryHighWatermark.enabled=true \
|
||||
--set memoryHighWatermark.type="relative" \
|
||||
--set memoryHighWatermark.value="0.4" \
|
||||
--set resources.limits.memory="2Gi"
|
||||
|
||||
Altenatively, user an absolute value for the memory memory high watermark :
|
||||
|
||||
$ helm install {{ .Release.Name }} bitnami/rabbitmq \
|
||||
--set memoryHighWatermark.enabled=true \
|
||||
--set memoryHighWatermark.type="absolute" \
|
||||
--set memoryHighWatermark.value="512MB"
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Validate values of rabbitmq - TLS configuration for Ingress
|
||||
*/}}
|
||||
{{- define "rabbitmq.validateValues.ingress.tls" -}}
|
||||
{{- if and .Values.ingress.enabled .Values.ingress.tls (not (include "rabbitmq.ingress.certManagerRequest" .Values.ingress.annotations)) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }}
|
||||
rabbitmq: ingress.tls
|
||||
You enabled the TLS configuration for the default ingress hostname but
|
||||
you did not enable any of the available mechanisms to create the TLS secret
|
||||
to be used by the Ingress Controller.
|
||||
Please use any of these alternatives:
|
||||
- Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates.
|
||||
- Relay on cert-manager to create it by setting the corresponding annotations
|
||||
- Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Validate values of RabbitMQ - Auth TLS enabled
|
||||
*/}}
|
||||
{{- define "rabbitmq.validateValues.auth.tls" -}}
|
||||
{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }}
|
||||
rabbitmq: auth.tls
|
||||
You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret.
|
||||
Please use any of these alternatives:
|
||||
- Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret`
|
||||
- Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`.
|
||||
- Enable auto-generated certificates using `auth.tls.autoGenerated`.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,18 @@
|
||||
{{- if .Values.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "rabbitmq.fullname" . }}-config
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
rabbitmq.conf: |-
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }}
|
||||
{{- if .Values.advancedConfiguration }}
|
||||
advanced.config: |-
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,4 @@
|
||||
{{- range .Values.extraDeploy }}
|
||||
---
|
||||
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,59 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if .Values.ingress.enabled }}
|
||||
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.ingress.certManager }}
|
||||
kubernetes.io/tls-acme: "true"
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.annotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName | quote }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- if .Values.ingress.domain }}
|
||||
- host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }}
|
||||
http:
|
||||
paths:
|
||||
{{- if .Values.ingress.extraPaths }}
|
||||
{{- toYaml .Values.ingress.extraPaths | nindent 10 }}
|
||||
{{- end }}
|
||||
- path: {{ .Values.ingress.path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.extraHosts }}
|
||||
- host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ default "/" .path }}
|
||||
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
|
||||
pathType: {{ default "ImplementationSpecific" .pathType }}
|
||||
{{- end }}
|
||||
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if or (and .Values.ingress.tls (or (include "rabbitmq.ingress.certManagerRequest" .Values.ingress.annotations) .Values.ingress.selfSigned)) .Values.ingress.extraTls }}
|
||||
tls:
|
||||
{{- if and .Values.ingress.tls (or (include "rabbitmq.ingress.certManagerRequest" .Values.ingress.annotations) .Values.ingress.selfSigned) }}
|
||||
- hosts:
|
||||
- {{ .Values.ingress.domain | quote }}
|
||||
secretName: {{ printf "%s-tls" .Values.ingress.domain }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.extraTls }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,39 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if .Values.networkPolicy.enabled }}
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
|
||||
ingress:
|
||||
# Allow inbound connections
|
||||
- ports:
|
||||
- port: 4369 # EPMD
|
||||
- port: {{ .Values.service.port }}
|
||||
- port: {{ .Values.service.tlsPort }}
|
||||
- port: {{ .Values.service.distPort }}
|
||||
- port: {{ .Values.service.managerPort }}
|
||||
{{- if not .Values.networkPolicy.allowExternal }}
|
||||
from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{ template "rabbitmq.fullname" . }}-client: "true"
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- include "common.labels.matchLabels" . | nindent 14 }}
|
||||
{{- if .Values.networkPolicy.additionalRules }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# Allow prometheus scrapes
|
||||
- ports:
|
||||
- port: {{ .Values.service.metricsPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,22 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if .Values.pdb.create }}
|
||||
apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.pdb.minAvailable }}
|
||||
minAvailable: {{ .Values.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
{{- if .Values.pdb.maxUnavailable }}
|
||||
maxUnavailable: {{ .Values.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,26 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
{{- if .Values.metrics.prometheusRule.namespace }}
|
||||
namespace: {{ .Values.metrics.prometheusRule.namespace }}
|
||||
{{- else }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
{{- end }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.metrics.prometheusRule.additionalLabels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
{{- with .Values.metrics.prometheusRule.rules }}
|
||||
- name: {{ template "rabbitmq.name" $ }}
|
||||
rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: Role
|
||||
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ template "rabbitmq.fullname" . }}-endpoint-reader
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create"]
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,20 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if and .Values.serviceAccount.create .Values.rbac.create }}
|
||||
kind: RoleBinding
|
||||
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ template "rabbitmq.fullname" . }}-endpoint-reader
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "rabbitmq.serviceAccountName" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "rabbitmq.fullname" . }}-endpoint-reader
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,48 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }}
|
||||
{{- if .Values.auth.password }}
|
||||
rabbitmq-password: {{ .Values.auth.password | b64enc | quote }}
|
||||
{{- else }}
|
||||
rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if not .Values.auth.existingErlangSecret }}
|
||||
{{- if .Values.auth.erlangCookie }}
|
||||
rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }}
|
||||
{{- else }}
|
||||
rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }}
|
||||
{{- range $key, $value := .Values.extraSecrets }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
{{- if $extraSecretsPrependReleaseName }}
|
||||
name: {{ $.Release.Name }}-{{ $key }}
|
||||
{{- else }}
|
||||
name: {{ $key }}
|
||||
{{- end }}
|
||||
namespace: {{ $.Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" $ | nindent 4 }}
|
||||
{{- if $.Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: Opaque
|
||||
stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,17 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
|
||||
secrets:
|
||||
- name: {{ include "rabbitmq.fullname" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
{{- if .Values.metrics.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
|
||||
{{- else }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
{{- end }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.metrics.serviceMonitor.additionalLabels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: metrics
|
||||
{{- if .Values.metrics.serviceMonitor.interval }}
|
||||
interval: {{ .Values.metrics.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor.honorLabels }}
|
||||
honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor.relabelings }}
|
||||
relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor.relabellings }}
|
||||
metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }}
|
||||
{{- else if .Values.metrics.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor.path }}
|
||||
path: {{ .Values.metrics.serviceMonitor.path }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace | quote }}
|
||||
{{- with .Values.metrics.serviceMonitor.podTargetLabels }}
|
||||
podTargetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.metrics.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,388 @@
|
||||
{{- if .Values.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.statefulsetLabels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceName: {{ template "rabbitmq.fullname" . }}-headless
|
||||
podManagementPolicy: {{ .Values.podManagementPolicy }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
updateStrategy:
|
||||
type: {{ .Values.updateStrategyType }}
|
||||
{{- if (eq "OnDelete" .Values.updateStrategyType) }}
|
||||
rollingUpdate: null
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "common.labels.standard" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }}
|
||||
{{- end }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }}
|
||||
{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }}
|
||||
checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
{{- end }}
|
||||
{{- if or .Values.podAnnotations .Values.metrics.enabled }}
|
||||
{{- include "rabbitmq.podAnnotations" . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- include "rabbitmq.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.schedulerName }}
|
||||
schedulerName: {{ .Values.schedulerName | quote }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }}
|
||||
{{- else }}
|
||||
affinity:
|
||||
{{- /* podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAffinityPreset "context" $) | nindent 10 -}}*/}}
|
||||
{{- /* podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.global.podAntiAffinityPreset "context" $) | nindent 10 }}*/}}
|
||||
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.global.nodeAffinityPreset.type "key" .Values.global.nodeAffinityPreset.key "values" .Values.global.nodeAffinityPreset.values) | nindent 10 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.hostAliases }}
|
||||
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
|
||||
{{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }}
|
||||
initContainers:
|
||||
{{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }}
|
||||
- name: volume-permissions
|
||||
{{- /* image: {{ include "rabbitmq.volumePermissions.image" . }}*/}}
|
||||
image: "{{ .Values.global.image.repository | default .Values.volumePermissions.image.repository}}/bitnami-shell:{{ .Values.volumePermissions.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
|
||||
command:
|
||||
- /bin/bash
|
||||
args:
|
||||
- -ec
|
||||
- |
|
||||
mkdir -p "/bitnami/rabbitmq/mnesia"
|
||||
chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/rabbitmq/mnesia"
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
{{- if .Values.volumePermissions.resources }}
|
||||
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /bitnami/rabbitmq/mnesia
|
||||
{{- end }}
|
||||
{{- if .Values.initContainers }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: rabbitmq
|
||||
{{- /* image: {{ template "rabbitmq.image" . }}*/}}
|
||||
image: "{{ .Values.global.image.repository | default .Values.image.repository }}/rabbitmq:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
|
||||
{{- if .Values.containerSecurityContext }}
|
||||
securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
|
||||
{{- else if .Values.command }}
|
||||
command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.diagnosticMode.enabled }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
|
||||
{{- else if .Values.args }}
|
||||
args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: BITNAMI_DEBUG
|
||||
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: K8S_SERVICE_NAME
|
||||
value: "{{ template "rabbitmq.fullname" . }}-headless"
|
||||
- name: K8S_ADDRESS_TYPE
|
||||
value: {{ .Values.clustering.addressType }}
|
||||
- name: RABBITMQ_FORCE_BOOT
|
||||
value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }}
|
||||
{{- if (eq "hostname" .Values.clustering.addressType) }}
|
||||
- name: RABBITMQ_NODE_NAME
|
||||
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
|
||||
- name: K8S_HOSTNAME_SUFFIX
|
||||
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
|
||||
{{- else }}
|
||||
- name: RABBITMQ_NODE_NAME
|
||||
value: "rabbit@$(MY_POD_NAME)"
|
||||
{{- end }}
|
||||
- name: RABBITMQ_MNESIA_DIR
|
||||
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
|
||||
- name: RABBITMQ_LDAP_ENABLE
|
||||
value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }}
|
||||
{{- if .Values.ldap.enabled }}
|
||||
- name: RABBITMQ_LDAP_TLS
|
||||
value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }}
|
||||
- name: RABBITMQ_LDAP_SERVERS
|
||||
value: {{ .Values.ldap.servers | join "," | quote }}
|
||||
- name: RABBITMQ_LDAP_SERVERS_PORT
|
||||
value: {{ .Values.ldap.port | quote }}
|
||||
- name: RABBITMQ_LDAP_USER_DN_PATTERN
|
||||
value: {{ .Values.ldap.user_dn_pattern }}
|
||||
{{- end }}
|
||||
- name: RABBITMQ_LOGS
|
||||
value: {{ .Values.logs | quote }}
|
||||
- name: RABBITMQ_ULIMIT_NOFILES
|
||||
value: {{ .Values.ulimitNofiles | quote }}
|
||||
{{- if and .Values.maxAvailableSchedulers }}
|
||||
- name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS
|
||||
value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}}
|
||||
{{- end }}
|
||||
- name: RABBITMQ_USE_LONGNAME
|
||||
value: "true"
|
||||
- name: RABBITMQ_ERL_COOKIE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ template "rabbitmq.secretErlangName" . }}
|
||||
key: rabbitmq-erlang-cookie
|
||||
{{- if .Values.loadDefinition.enabled }}
|
||||
- name: RABBITMQ_LOAD_DEFINITIONS
|
||||
value: "yes"
|
||||
- name: RABBITMQ_SECURE_PASSWORD
|
||||
value: "no"
|
||||
{{- else }}
|
||||
- name: RABBITMQ_LOAD_DEFINITIONS
|
||||
value: "no"
|
||||
- name: RABBITMQ_SECURE_PASSWORD
|
||||
value: "yes"
|
||||
- name: RABBITMQ_USERNAME
|
||||
value: {{ .Values.auth.username | quote }}
|
||||
- name: RABBITMQ_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ template "rabbitmq.secretPasswordName" . }}
|
||||
key: rabbitmq-password
|
||||
{{- end }}
|
||||
- name: RABBITMQ_PLUGINS
|
||||
value: {{ include "rabbitmq.plugins" . | quote }}
|
||||
{{- if .Values.communityPlugins }}
|
||||
- name: RABBITMQ_COMMUNITY_PLUGINS
|
||||
value: {{ .Values.communityPlugins | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.extraEnvVars }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
|
||||
envFrom:
|
||||
{{- if .Values.extraEnvVarsCM }}
|
||||
- configMapRef:
|
||||
name: {{ tpl .Values.extraEnvVarsCM . | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.extraEnvVarsSecret }}
|
||||
- secretRef:
|
||||
name: {{ tpl .Values.extraEnvVarsSecret . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }}
|
||||
- name: amqp
|
||||
containerPort: 5672
|
||||
{{- end }}
|
||||
{{- if .Values.auth.tls.enabled }}
|
||||
- name: amqp-ssl
|
||||
containerPort: {{ .Values.service.tlsPort }}
|
||||
{{- end }}
|
||||
- name: dist
|
||||
containerPort: 25672
|
||||
- name: dashboard
|
||||
containerPort: 15672
|
||||
- name: epmd
|
||||
containerPort: 4369
|
||||
{{- if .Values.metrics.enabled }}
|
||||
- name: metrics
|
||||
containerPort: 9419
|
||||
{{- end }}
|
||||
{{- if .Values.extraContainerPorts }}
|
||||
{{- toYaml .Values.extraContainerPorts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if not .Values.diagnosticMode.enabled }}
|
||||
{{- if .Values.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- rabbitmq-diagnostics -q ping
|
||||
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.livenessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
|
||||
{{- else if .Values.customLivenessProbe }}
|
||||
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
|
||||
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
|
||||
{{- else if .Values.customReadinessProbe }}
|
||||
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.customStartupProbe }}
|
||||
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
lifecycle:
|
||||
{{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }}
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
until rabbitmqctl cluster_status >/dev/null; do
|
||||
echo "Waiting for cluster readiness..."
|
||||
sleep 5
|
||||
done
|
||||
rabbitmq-queues rebalance "all"
|
||||
{{- end }}
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
|
||||
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }}
|
||||
else
|
||||
rabbitmqctl stop_app
|
||||
fi
|
||||
{{- end }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: configuration
|
||||
mountPath: /bitnami/rabbitmq/conf
|
||||
- name: data
|
||||
mountPath: /bitnami/rabbitmq/mnesia
|
||||
{{- if .Values.auth.tls.enabled }}
|
||||
- name: certs
|
||||
mountPath: /opt/bitnami/rabbitmq/certs
|
||||
{{- end }}
|
||||
{{- if .Values.loadDefinition.enabled }}
|
||||
- name: load-definition-volume
|
||||
mountPath: /app
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumeMounts }}
|
||||
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.sidecars }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if .Values.persistence.volumes }}
|
||||
{{- toYaml .Values.persistence.volumes | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.auth.tls.enabled }}
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ template "rabbitmq.tlsSecretName" . }}
|
||||
items:
|
||||
- key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }}
|
||||
path: ca_certificate.pem
|
||||
- key: tls.crt
|
||||
path: server_certificate.pem
|
||||
- key: tls.key
|
||||
path: server_key.pem
|
||||
{{- end }}
|
||||
- name: configuration
|
||||
configMap:
|
||||
name: {{ template "rabbitmq.fullname" . }}-config
|
||||
items:
|
||||
- key: rabbitmq.conf
|
||||
path: rabbitmq.conf
|
||||
{{- if .Values.advancedConfiguration }}
|
||||
- key: advanced.config
|
||||
path: advanced.config
|
||||
{{- end }}
|
||||
{{- if .Values.loadDefinition.enabled }}
|
||||
- name: load-definition-volume
|
||||
secret:
|
||||
secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumes }}
|
||||
{{- toYaml .Values.extraVolumes | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if not (contains "data" (quote .Values.persistence.volumes)) }}
|
||||
{{- if not .Values.persistence.enabled }}
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
{{- else if .Values.persistence.existingClaim }}
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
{{- with .Values.persistence.existingClaim }}
|
||||
claimName: {{ tpl . $ }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
labels: {{- include "common.labels.matchLabels" . | nindent 10 }}
|
||||
{{- if .Values.persistence.annotations }}
|
||||
annotations:
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size | quote }}
|
||||
{{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }}
|
||||
{{- if .Values.persistence.selector }}
|
||||
selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,45 @@
|
||||
{{- if .Values.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}-headless
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }}
|
||||
annotations:
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.service.annotationsHeadless }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: {{ .Values.service.epmdPortName }}
|
||||
port: 4369
|
||||
targetPort: epmd
|
||||
{{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }}
|
||||
- name: amqp
|
||||
port: {{ .Values.service.port }}
|
||||
targetPort: {{ .Values.service.portName }}
|
||||
{{- end }}
|
||||
{{- if .Values.auth.tls.enabled }}
|
||||
- name: {{ .Values.service.tlsPortName }}
|
||||
port: {{ .Values.service.tlsPort }}
|
||||
targetPort: amqp-tls
|
||||
{{- end }}
|
||||
- name: {{ .Values.service.distPortName }}
|
||||
port: {{ .Values.service.distPort }}
|
||||
targetPort: dist
|
||||
{{- if .Values.service.managerPortEnabled }}
|
||||
- name: {{ .Values.service.managerPortName }}
|
||||
port: {{ .Values.service.managerPort }}
|
||||
targetPort: stats
|
||||
{{- end }}
|
||||
selector: {{ include "common.labels.matchLabels" . | nindent 4 }}
|
||||
publishNotReadyAddresses: true
|
||||
|
||||
|
||||
{{- end }}
|
||||
@@ -0,0 +1,97 @@
|
||||
{{- if .Values.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or (.Values.service.annotations) (.Values.commonAnnotations) }}
|
||||
annotations:
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.service.annotations }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.global.middlewareService.type }}
|
||||
{{- if eq .Values.service.type "LoadBalancer" }}
|
||||
{{- if not (empty .Values.service.loadBalancerIP) }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }}
|
||||
- name: {{ .Values.service.portName }}
|
||||
port: {{ .Values.service.port }}
|
||||
targetPort: amqp
|
||||
{{- if (eq .Values.service.type "ClusterIP") }}
|
||||
nodePort: null
|
||||
{{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }}
|
||||
nodePort: {{ .Values.nodePort.rabbitmq }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.auth.tls.enabled }}
|
||||
- name: {{ .Values.service.tlsPortName }}
|
||||
port: {{ .Values.service.tlsPort }}
|
||||
targetPort: amqp-ssl
|
||||
{{- if (eq .Values.service.type "ClusterIP") }}
|
||||
nodePort: null
|
||||
{{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }}
|
||||
nodePort: {{ .Values.service.tlsNodePort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- /* - name: {{ .Values.service.epmdPortName }}*/}}
|
||||
{{- /* port: 4369*/}}
|
||||
{{- /* targetPort: epmd*/}}
|
||||
{{- /* {{- if (eq .Values.service.type "ClusterIP") }}*/}}
|
||||
{{- /* nodePort: null*/}}
|
||||
{{- /* {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.epmdNodePort)) }}*/}}
|
||||
{{- /* nodePort: {{ .Values.service.epmdNodePort }}*/}}
|
||||
{{- /* {{- end }}*/}}
|
||||
{{- /* - name: {{ .Values.service.distPortName }}*/}}
|
||||
{{- /* port: {{ .Values.service.distPort }}*/}}
|
||||
{{- /* targetPort: dist*/}}
|
||||
{{- /* {{- if eq .Values.service.type "ClusterIP" }}*/}}
|
||||
{{- /* nodePort: null*/}}
|
||||
{{- /* {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.distNodePort)) }}*/}}
|
||||
{{- /* nodePort: {{ .Values.service.distNodePort }}*/}}
|
||||
{{- /* {{- end }}*/}}
|
||||
{{- if .Values.service.managerPortEnabled }}
|
||||
- name: {{ .Values.service.managerPortName }}
|
||||
port: {{ .Values.service.managerPort }}
|
||||
targetPort: dashboard
|
||||
{{- if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.managerNodePort)) }}
|
||||
nodePort: {{ .Values.nodePort.dashboard }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.enabled }}
|
||||
- name: {{ .Values.service.metricsPortName }}
|
||||
port: {{ .Values.service.metricsPort }}
|
||||
targetPort: metrics
|
||||
{{- if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.metricsNodePort)) }}
|
||||
nodePort: {{ .Values.service.metricsNodePort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.extraPorts }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
selector: {{ include "common.labels.matchLabels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,76 @@
|
||||
{{- if .Values.enabled }}
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- if .Values.ingress.secrets }}
|
||||
{{- range .Values.ingress.secrets }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
namespace: {{ $.Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" $ | nindent 4 }}
|
||||
{{- if $.Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .certificate | b64enc }}
|
||||
tls.key: {{ .key | b64enc }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and .Values.ingress.tls .Values.ingress.selfSigned }}
|
||||
{{- $ca := genCA "rabbitmq-ca" 365 }}
|
||||
{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ printf "%s-tls" .Values.ingress.hostname }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ $cert.Cert | b64enc | quote }}
|
||||
tls.key: {{ $cert.Key | b64enc | quote }}
|
||||
ca.crt: {{ $ca.Cert | b64enc | quote }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if (include "rabbitmq.createTlsSecret" . ) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "rabbitmq.fullname" . }}-certs
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
{{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }}
|
||||
ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }}
|
||||
tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }}
|
||||
tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }}
|
||||
{{- else }}
|
||||
{{- $ca := genCA "rabbitmq-internal-ca" 365 }}
|
||||
{{- $fullname := include "rabbitmq.fullname" . }}
|
||||
{{- $releaseNamespace := .Release.Namespace }}
|
||||
{{- $clusterDomain := .Values.clusterDomain }}
|
||||
{{- $serviceName := include "rabbitmq.fullname" . }}
|
||||
{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }}
|
||||
{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }}
|
||||
ca.crt: {{ $ca.Cert | b64enc | quote }}
|
||||
tls.crt: {{ $crt.Cert | b64enc | quote }}
|
||||
tls.key: {{ $crt.Key | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,100 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"auth": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"username": {
|
||||
"type": "string",
|
||||
"title": "RabbitMQ user",
|
||||
"form": true
|
||||
},
|
||||
"password": {
|
||||
"type": "string",
|
||||
"title": "RabbitMQ password",
|
||||
"form": true,
|
||||
"description": "Defaults to a random 10-character alphanumeric string if not set"
|
||||
}
|
||||
}
|
||||
},
|
||||
"extraConfiguration": {
|
||||
"type": "string",
|
||||
"title": "Extra RabbitMQ Configuration",
|
||||
"form": true,
|
||||
"render": "textArea",
|
||||
"description": "Extra configuration to be appended to RabbitMQ Configuration"
|
||||
},
|
||||
"replicaCount": {
|
||||
"type": "integer",
|
||||
"form": true,
|
||||
"title": "Number of replicas",
|
||||
"description": "Number of replicas to deploy"
|
||||
},
|
||||
"persistence": {
|
||||
"type": "object",
|
||||
"title": "Persistence configuration",
|
||||
"form": true,
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"form": true,
|
||||
"title": "Enable persistence",
|
||||
"description": "Enable persistence using Persistent Volume Claims"
|
||||
},
|
||||
"size": {
|
||||
"type": "string",
|
||||
"title": "Persistent Volume Size",
|
||||
"form": true,
|
||||
"render": "slider",
|
||||
"sliderMin": 1,
|
||||
"sliderMax": 100,
|
||||
"sliderUnit": "Gi",
|
||||
"hidden": {
|
||||
"value": false,
|
||||
"path": "persistence/enabled"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"volumePermissions": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"form": true,
|
||||
"title": "Enable Init Containers",
|
||||
"description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination"
|
||||
}
|
||||
}
|
||||
},
|
||||
"metrics": {
|
||||
"type": "object",
|
||||
"form": true,
|
||||
"title": "Prometheus metrics details",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"title": "Enable Prometheus metrics for RabbitMQ",
|
||||
"description": "Install Prometheus plugin in the RabbitMQ container",
|
||||
"form": true
|
||||
},
|
||||
"serviceMonitor": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"title": "Create Prometheus Operator ServiceMonitor",
|
||||
"description": "Create a ServiceMonitor to track metrics using Prometheus Operator",
|
||||
"form": true,
|
||||
"hidden": {
|
||||
"value": false,
|
||||
"path": "metrics/enabled"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,34 @@
|
||||
1. emqx
|
||||
{{- if .Values.emqx.enabled -}}
|
||||
{{ .Values.emqx.appName}} is created,
|
||||
echo "${emqx_svc}"
|
||||
{{- else -}}
|
||||
{{ .Values.emqx.appName}} is skipped
|
||||
{{- end }}
|
||||
|
||||
2. mongo
|
||||
{{- if .Values.mongo.enabled -}}
|
||||
{{ .Values.mongo.appName}} is created
|
||||
export mongo_svc=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ .Values.mongo.appName }} )
|
||||
echo "${mongo_svc}"
|
||||
{{- else -}}
|
||||
{{ .Values.mongo.appName}} is skipped
|
||||
{{- end }}
|
||||
|
||||
3. nacos
|
||||
{{- if .Values.nacos.enabled -}}
|
||||
{{ .Values.nacos.appName}} is created,
|
||||
export nacos_svc=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ .Values.nacos.appName}} )
|
||||
echo "${nacos_svc}"
|
||||
{{- else -}}
|
||||
{{ .Values.nacos.appName}} is skipped
|
||||
{{- end }}
|
||||
|
||||
4. rabbitmq
|
||||
{{- if .Values.rabbitmq.enabled -}}
|
||||
{{ .Values.rabbitmq.appName}} is created,
|
||||
export rabbitmq_svc=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ .Values.rabbitmq.appName}} )
|
||||
echo "${rabbitmq_svc}"
|
||||
{{- else -}}
|
||||
{{ .Values.rabbitmq.appName }} is skipped
|
||||
{{- end }}
|
||||
@@ -0,0 +1,54 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the node affinity to determine the deployment of environment
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.affinity" -}}
|
||||
{{- with .Values.global.affinity }}
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: uavcloud.affinity
|
||||
operator: In
|
||||
values:
|
||||
- {{ .k8sNodeEnv }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the tls configuration for https to enable trafik-ingress
|
||||
*/}}
|
||||
{{- define "uavcloud-middleware.trafik.tls" -}}
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: default
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Create the volumeClaimTemplates to use
|
||||
*/}}
|
||||
{{/*{{- define "uavcloud-middleware.global.volumeClaimTemplates" -}}*/}}
|
||||
{{/*- metadata:*/}}
|
||||
{{/* name: {{ .Values.global.volume.name }}*/}}
|
||||
{{/* namespace: {{ .Release.Namespace }}*/}}
|
||||
{{/* labels:*/}}
|
||||
{{/* cmii.type: {{ .Values.global.application.type }}*/}}
|
||||
{{/* cmii.app: {{ .Values.global.volume.name }}*/}}
|
||||
{{/* spec:*/}}
|
||||
{{/* storageClassName: {{ .Values.global.storageClass.name }}*/}}
|
||||
{{/* accessModes: [ "{{ .Values.storageClass.accessMode }}" ]*/}}
|
||||
{{/* volumeMode: {{ .Values.storageClass.volumeMode }}*/}}
|
||||
{{/* resources:*/}}
|
||||
{{/* requests:*/}}
|
||||
{{/* storage: {{ .Values.storageClass.resources.requests.storage }}*/}}
|
||||
{{/*{{- end }}*/}}
|
||||
104
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/values.yaml
Normal file
104
3-湘潭钢铁项目/2-helm-chart/charts/all-middleware/values.yaml
Normal file
@@ -0,0 +1,104 @@
|
||||
global:
|
||||
image:
|
||||
repository: harbor.cdcyy.com.cn/cmii # override all applications image repository
|
||||
tag: "3.2.1" # override all applications' image tag/version
|
||||
pullPolicy: Always # override all applications' image pullPolicy
|
||||
middlewareService:
|
||||
type: NodePort
|
||||
application:
|
||||
type: middleware
|
||||
affinity: {}
|
||||
nodeAffinityPreset:
|
||||
type: hard
|
||||
key: uavcloud.env
|
||||
values:
|
||||
- demo
|
||||
volume:
|
||||
name: glusterfs-volume
|
||||
storageClass:
|
||||
name: heketi-glusterfs-distribute
|
||||
|
||||
|
||||
emqx:
|
||||
enabled:
|
||||
clusterMode: true
|
||||
standaloneMode: false
|
||||
nodePort:
|
||||
mqtt: 31883
|
||||
dashboard: 38085
|
||||
mqttWebSocket: 38083
|
||||
ingress:
|
||||
enabled: false
|
||||
domain: emqx.ig-dev.uavcmlc.com
|
||||
tls:
|
||||
secretName: x.ig-dev.uavcmlc.com-tls
|
||||
auth:
|
||||
username: cmlc
|
||||
password: odD8#Ve7.B
|
||||
|
||||
|
||||
mongo:
|
||||
enabled: true
|
||||
nodePort:
|
||||
mongo: 37017
|
||||
auth:
|
||||
username: cmlc
|
||||
password: REdPza8#oVlt
|
||||
|
||||
|
||||
nacos:
|
||||
enabled: true
|
||||
#name: helm-nacos
|
||||
nodePort:
|
||||
dashboard: 38848
|
||||
ingress:
|
||||
enabled: false # please keep false in most cases.
|
||||
domain: nacos.dashboard.uavcloud-dev.io
|
||||
database:
|
||||
# needed to be modified according to the real mysql-database configuration !!
|
||||
host: helm-mysql
|
||||
port: 3306
|
||||
username: k8s_admin
|
||||
password: EWde2cKP9w.G
|
||||
db_name: nacos_config
|
||||
|
||||
rabbitmq:
|
||||
enabled: true
|
||||
#name: helm-rabbitmq
|
||||
nodePort:
|
||||
rabbitmq: 35672
|
||||
dashboard: 35675
|
||||
ingress:
|
||||
enabled: false
|
||||
domain: rabbitmq.dashboard.uavcloud-dev.io
|
||||
auth:
|
||||
username: admin
|
||||
password: nYcRN91r._hj
|
||||
|
||||
|
||||
redis:
|
||||
enabled:
|
||||
masterSlaveMode: true
|
||||
standaloneMode: false
|
||||
nodePort:
|
||||
redisMaster: 36379
|
||||
redisSlave: 36380
|
||||
auth: Mcache@4522
|
||||
|
||||
minio:
|
||||
enabled: true
|
||||
nodePort:
|
||||
dashboard: 36677
|
||||
auth:
|
||||
username: cmii
|
||||
password: B#923fC7mk
|
||||
|
||||
mysql:
|
||||
enabled:
|
||||
masterSlaveMode: false
|
||||
standaloneMode: true
|
||||
nodePort:
|
||||
mysqlMaster: 33306
|
||||
mysqlSlave: 33307
|
||||
persist:
|
||||
type: localPath # choose between localPath or pvc
|
||||
@@ -0,0 +1,25 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
ci/Jenkinsfile
|
||||
ci/values_override_template.yaml
|
||||
@@ -0,0 +1,30 @@
|
||||
apiVersion: v2
|
||||
name: all-persistence-volume-claims
|
||||
description: Persistent Volume Claim generator which generates all pvcs for backend applications
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 2.2.2
|
||||
|
||||
keywords:
|
||||
- uavcloud
|
||||
- pvc
|
||||
- all-persistence-volume-claims
|
||||
- function
|
||||
- chinamobile
|
||||
@@ -0,0 +1,12 @@
|
||||
|
||||
{{- if .Values.enabled }}
|
||||
[INFO] 选择创建 Persistent Volume Claims !
|
||||
|
||||
{{- range $key, $value := .Values.pvc.name_and_capacity }}
|
||||
{{- $applicationName := $key | trunc 63 }}
|
||||
{{- $capacity := $value | trunc 63 | quote }}
|
||||
|
||||
persistence volume claim with name: {{ $applicationName }} and capacity: {{ $capacity}} has been created !
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user