新增cursor的配置

This commit is contained in:
zeaslity
2025-02-25 17:01:14 +08:00
parent 5a8aa53d64
commit 60a1849207
70 changed files with 60241 additions and 17631 deletions

View File

@@ -0,0 +1,60 @@
---
description: 构建agent-wdd的特定上下文的规则
globs: *,go
---
# 你是一个精通golang的编程大师熟练掌握github.com/spf13/cobra框架能够构建出非常现代的cli工具
# 整个项目的架构结构如下
1. base 服务器基础操作 相关的功能存放于@Base.go中
1. docker docker相关的操作
1. online 使用网络安装特定版本的docker
2. remove 卸载docker
3. local 从本地docker二级制文件安装docker
2. dockercompose dockercompose相关的操作
1. online 使用网络安装特定版本的dockercompose
2. remove 卸载dockercompose
3. local 从本地安装dockercompose
3. tools 利用本机的yumapt等从网络安装常用的软件
4. ssh ssh相关的操作
1. key 安装特定的ssh-key
2. port 修改sshd的端口为特定端口
3. config 修改sshd的配置为特定配置
5. swap 关闭本机的swap缓存
6. selinux 关闭本机的selinux相关内容
7. firewall 关闭本机的防火墙相关的设置
8. sysconfig 修改主机sysconfig相关的内容
2. zsh zsh相关的内容
1. cn 参数如果有此参数则从跟国内安装zsh
3. proxy 主机代理相关的内容
1. xray xray相关的内容
1. install 安装最新版本的xray
2. local 从本机安装xray
3. upgrade 卸载xray
2. vmess 一键设置vmess的代理模式
3. vless 一键设置vless的代理模式
4. sysconfig 修改主机proxy相关的内核参数
4. acme acme相关的内容
1. install 安装acme.sh
2. cert 为特定域名申请证书文件
3. list 列出本地存在的证书
5. wdd
1. host 更新所有的hosts
2. resolve 更新主机的resolve
3. agent 此部分才是octopus-agent的内容
1. install
2. upgrade
3. remove
4. upgrade 更新octopus-wdd自身
6. security
1. ssh
7. info 获取主机相关的信息并且保存至config文件 实现在 [Info.go](mdc:agent-wdd/cmd/Info.go)
1. cpu cpu相关的信息 [CPU.go](mdc:agent-wdd/config/CPU.go)
2. os 操作系统相关的信息 [OS.go](mdc:agent-wdd/config/OS.go)
3. mem mem相关的信息 [Memory.go](mdc:agent-wdd/config/Memory.go)
4. disk disk相关的信息 [Disk.go](mdc:agent-wdd/config/Disk.go)
5. network 网络相关的内容 [Network.go](mdc:agent-wdd/config/Network.go)
6. all 主机全部的信息
8. version 打印octopus-agent的构建版本信息
9. config octopus-wdd使用的配置文件 文件
1. show

View File

@@ -1,5 +1,92 @@
package real_project
var Cmii620ArmImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sync:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-bridge:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-iot-dispatcher:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-sky-converge:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-fwdd:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-security-center:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-security-center:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-manager:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL",
}
var Cmii620ImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.2.0-demo",

View File

@@ -0,0 +1,110 @@
{\rtf1\ansi\ansicpg936\deff0\deflang1033\deflangfe2052{\fonttbl{\f0\fnil\fcharset129 Courier New;}{\f1\fmodern\fprq6\fcharset134 \'cb\'ce\'cc\'e5;}}
\viewkind4\uc1\pard\lang2052\f0\fs10 \'a6\'a3\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a4
\par \'a6\'a2 ? MobaXterm 12.4 ? \'a6\'a2
\par \'a6\'a2 (SSH client, X-server and networking tools) \'a6\'a2
\par \'a6\'a2 \'a6\'a2
\par \'a6\'a2 ? SSH session to root@192.168.35.71 \'a6\'a2
\par \'a6\'a2 ? SSH compression : ? \'a6\'a2
\par \'a6\'a2 ? SSH-browser : ? \'a6\'a2
\par \'a6\'a2 ? X11-forwarding : ? (disabled or not supported by server) \'a6\'a2
\par \'a6\'a2 ? DISPLAY : 10.250.0.14:0.0 \'a6\'a2
\par \'a6\'a2 \'a6\'a2
\par \'a6\'a2 ? For more info, ctrl+click on help or visit our website \'a6\'a2
\par \'a6\'a6\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a5
\par
\par Authorized users only.All activity may be monitored and reported!
\par Last login: Tue Dec 17 14:26:14 2024 from 192.168.103.36
\par ? root@onetools-2 \f1\'ab\'f3 ~ \'ab\'f3 ls
\par 1.py all-gzip-image-list.txt go logs node-p.tar.gz port_linux_amd64 wdd
\par 2.sh Clash.Verge_2.0.1_x64-setup.exe image nethogs nohup.out Postman-win64.exe
\par all-cmii-image-list.txt cmii_third k0s node-p-0.8.7.tar.gz octopus_image test.sh
\par ? root@onetools-2 \'ab\'f3 ~ \'ab\'f3 cd image
\par ? root@onetools-2 \'ab\'f3 ~/image \'ab\'f3 ls
\par 2.sh
\par 'cmii-live-operator=v5.7.0=2024-12-11=206.tar.gz'
\par 'cmii-suav-supervision=6.1.1=2024-12-10=418.tar.gz'
\par 'cmii-uav-advanced5g=6.1.1=2024-12-11=915.tar.gz'
\par 'cmii-uav-bridge=5.7.0-xzga-121001=2024-12-10=210.tar.gz'
\par 'cmii-uav-bridge=5.7.0-xzga-1210=2024-12-10=522.tar.gz'
\par 'cmii-uav-cloud-live=5.7.0-szga=2024-12-13=816.tar.gz'
\par 'cmii-uav-device=5.6.0-szga-1212-arm=2024-12-12=354.tar.gz'
\par 'cmii-uav-device=5.6.0-szga-1216-arm=2024-12-16=178.tar.gz'
\par 'cmii-uav-device=6.1.0-szga-1210=2024-12-10=908.tar.gz'
\par 'cmii-uav-integration=6.1.0-xzga-1211=2024-12-11=277.tar.gz'
\par 'cmii-uav-integration=6.1.0-xzga-1212=2024-12-12=223.tar.gz'
\par 'cmii-uav-integration=6.1.0-xzga-1212=2024-12-12=770.tar.gz'
\par 'cmii-uav-mission=5.4.0-zyga-1216=2024-12-16=288.tar.gz'
\par 'cmii-uav-mission=6.1.1-xzga=2024-12-11=578.tar.gz'
\par 'cmii-uav-mqtthandler=6.1.0-1217-shbj-arm=2024-12-17=514.tar.gz'
\par 'cmii-uav-oauth=6.0.0-33992-120601=2024-12-13=980.tar.gz'
\par 'cmii-uav-platform=5.4.0-27971-zyga-1217=2024-12-17=845.tar.gz'
\par 'cmii-uav-platform=5.7.0-32124-121101-arm=2024-12-11=423.tar.gz'
\par 'cmii-uav-platform=5.7.0-32124-121201-arm=2024-12-12=915.tar.gz'
\par 'cmii-uav-platform=5.7.0-32124-121301-arm=2024-12-13=302.tar.gz'
\par 'cmii-uav-platform=5.7.0-32124-121601-arm=2024-12-16=674.tar.gz'
\par 'cmii-uav-platform=5.7.0-32124-121602-arm=2024-12-16=611.tar.gz'
\par 'cmii-uav-platform=5.7.0-32124-121701-arm=2024-12-17=867.tar.gz'
\par 'cmii-uav-platform=6.0.0-33992-121301=2024-12-13=435.tar.gz'
\par 'cmii-uav-platform=6.1.0-32124-shbj-1217-arm=2024-12-17=736.tar.gz'
\par 'cmii-uav-platform=6.1.0-33579-121110=2024-12-11=944.tar.gz'
\par 'cmii-uav-platform=6.1.0-33579-1211=2024-12-11=358.tar.gz'
\par 'cmii-uav-platform=6.1.1-33579=2024-12-10=863.tar.gz'
\par 'cmii-uav-platform=6.1.1-33579=2024-12-10=866.tar.gz'
\par 'cmii-uav-platform-share=6.1.1=2024-12-10=429.tar.gz'
\par 'cmii-uav-surveillance=5.6.0-szga-1211-arm=2024-12-11=866.tar.gz'
\par 'cmii-uav-surveillance=5.6.0-szga-1217-arm=2024-12-17=394.tar.gz'
\par 'cmii-uav-surveillance=5.7.0-xzga-121101=2024-12-11=439.tar.gz'
\par 'cmii-uav-surveillance=5.7.0-xzga-121101=2024-12-11=939.tar.gz'
\par 'cmii-zlm-oss-adaptor=v2.7.3=2024-12-11=473.tar.gz'
\par 'cmii-zlm-oss-adaptor=v2.7.3=2024-12-11=485.tar.gz'
\par 'cmlc-live=v2.7.3=2024-12-11=256.tar.gz'
\par 'cmlc-live=v2.7.3=2024-12-11=354.tar.gz'
\par download_and_compress.sh
\par image-clean.sh
\par image-sync.sh
\par kubectl-1.30.4-amd64
\par 'nginx=1.27.0=2024-12-11=538.tar.gz'
\par nohup.out
\par rke-1.30.4
\par yaml
\par ? root@onetools-2 \'ab\'f3 ~/image \'ab\'f3 bash image-sync.sh -h 172.26.0.31:8033 -u harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'d0\'e8\'d2\'aa\'b4\'a6\'c0\'ed\'b5\'c4\'be\'b5\'cf\'f1\'c3\'fb\'b3\'c6\'ce\'aa => harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1
\par
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'bf\'aa\'ca\'bc\'cf\'c2\'d4\'d8\'be\'b5\'cf\'f1\'a3\'a1
\par
\par \'cf\'c2\'d4\'d8-\'be\'b5\'cf\'f1\'cf\'c2\'d4\'d8\'b3\'c9\'b9\'a6\'a3\'a1 => harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1
\par
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'bd\'ab\'d2\'aa\'b0\'d1\'be\'b5\'cf\'f1\'d1\'b9\'cb\'f5\'ce\'aa => cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'d1\'b9\'cb\'f5\'b3\'c9\'b9\'a6 \'a3\'a1 cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
\par
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'bf\'aa\'ca\'bc\'c9\'cf\'b4\'ab\'d6\'c1OSS\'d6\'d0!
\par ...4-12-18=140.tar.gz: 204.79 MiB / 204.79 MiB \'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5 24.61 MiB/s 8s\'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'c9\'cf\'b4\'abOSS\'b3\'c9\'b9\'a6 => [2024-12-18 09:13:52 CST] 205MiB STANDARD cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
\par
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'c7\'eb\'d4\'da\'c4\'bf\'b1\'eaMaster\'d6\'f7\'bb\'fa\'d6\'b4\'d0\'d0\'c8\'e7\'cf\'c2\'c3\'fc\'c1\'ee \'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd
\par
\par
\par source <(curl -sL https://b2.107421.xyz/image-sync.sh) -d cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
\par
\par
\par \'a1\'be\'b8\'fc\'d0\'c2\'a1\'bf - \'d2\'bb\'bc\'fc\'b8\'fc\'d0\'c2\'ce\'a2\'b7\'fe\'ce\'f1\'b5\'c4Tag\'a3\'ac\'c7\'eb\'d6\'b4\'d0\'d0\'c8\'e7\'cf\'c2\'c3\'fc\'c1\'ee \'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd
\par
\par wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/cmii-admin-data=6.1.1=2024-12-18=140.tar.gz && bash ./cmii-update.sh cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
\par
\par
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'ca\'d6\'b6\'af\'c3\'fc\'c1\'ee\'d6\'b4\'d0\'d0\'c8\'e7\'cf\'c2\'a3\'ac \'c4\'bf\'b1\'ea\'be\'b5\'cf\'f1\'c8\'ab\'b3\'cc\'b5\'d8\'d6\'b7\'ce\'aa => 172.26.0.31:8033/cmii/cmii-admin-data:6.1.1
\par
\par wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/cmii-admin-data=6.1.1=2024-12-18=140.tar.gz && docker load < cmii-admin-data=6.1.1=2024-12-18=140.tar.gz && docker tag harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1 172.26.0.31:8033/cmii/cmii-admin-data:6.1.1 && docker push 172.26.0.31:8033/cmii/cmii-admin-data:6.1.1
\par
\par
\par ? root@onetools-2 \'ab\'f3 ~/image \'ab\'f3
\par Network error: Software caused connection abort
\par
\par \f0\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1
\par
\par Session stopped
\par - Press <return> to exit tab
\par - Press R to restart session
\par - Press S to save terminal output to file
\par
\par }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,602 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: jxyd
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.138.132.240:30800",
ApplicationShortName: "",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 10.20.1.130:8033/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 10.20.1.130:8033/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,276 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: jxyd
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "jxyd"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Ve7.B
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 10.20.1.130:8033/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: jxyd
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: jxyd
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: jxyd
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,746 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: jxyd
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.jxyd.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: jxyd
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sec-awareness.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sec-awareness
servicePort: 8080
- host: cmii-uav-security-trace.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-trace
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-jxyd.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: jxyd
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.jxyd.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: jxyd
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: jxyd
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 10.20.1.130:8033/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: jxyd
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: jxyd
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: jxyd
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: jxyd
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: jxyd
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: jxyd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: jxyd
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 10.20.1.130:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 10.20.1.130:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/jxyd/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: jxyd
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: jxyd
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: jxyd
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 10.20.1.130:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 10.20.1.130:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.20.1.130:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.20.1.132
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.20.1.132
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: jxyd
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: jxyd
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: jxyd
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: jxyd
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 10.20.1.130:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 10.20.1.130:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.20.1.130:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: jxyd
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: jxyd
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: jxyd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.20.1.130:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.jxyd.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: jxyd
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://36.138.132.240:30800;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 10.20.1.130:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 36.138.132.240
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: jxyd/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: jxyd/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 10.20.1.130:8033/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://10.20.1.132:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: jxyd/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 10.20.1.130:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://36.138.132.240:31935'
rtsp: 'rtsp://36.138.132.240:30554'
srt: 'srt://36.138.132.240:30556'
flv: 'http://36.138.132.240:30500'
hls: 'http://36.138.132.240:30500'
rtc: 'webrtc://36.138.132.240:30080'
replay: 'https://36.138.132.240:30333'
minio:
endpoint: http://10.20.1.132:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,602 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 172.31.2.7:8033/admin/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 172.31.2.7:8033/admin/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,276 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: szgz
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "szgz"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Ve7.B
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 172.31.2.7:8033/admin/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: szgz
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: szgz
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: szgz
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,730 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: szgz
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.szgz.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: szgz
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: szgz
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.szgz.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 172.31.2.7:8033/admin/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: szgz
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 172.31.2.7:8033/admin/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 172.31.2.7:8033/admin/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/szgz/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 172.31.2.7:8033/admin/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 172.31.2.7:8033/admin/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 172.31.2.7:8033/admin/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 172.31.2.7
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 172.31.2.7
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 172.31.2.7:8033/admin/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 172.31.2.7:8033/admin/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 172.31.2.7:8033/admin/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 172.31.2.7:8033/admin/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.szgz.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: szgz
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://172.31.2.7:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 172.31.2.7:8033/admin/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 172.31.2.7
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: szgz/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: szgz/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 172.31.2.7:8033/admin/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://172.31.2.9:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: szgz/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 172.31.2.7:8033/admin/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://172.31.2.7:31935'
rtsp: 'rtsp://172.31.2.7:30554'
srt: 'srt://172.31.2.7:30556'
flv: 'http://172.31.2.7:30500'
hls: 'http://172.31.2.7:30500'
rtc: 'webrtc://172.31.2.7:30080'
replay: 'https://172.31.2.7:30333'
minio:
endpoint: http://172.31.2.9:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,602 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 172.31.2.7:8033/cmii/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 172.31.2.7:8033/cmii/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,276 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: szgz
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "szgz"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Ve7.B
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 172.31.2.7:8033/cmii/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: szgz
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: szgz
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: szgz
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,730 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: szgz
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.szgz.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: szgz
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: szgz
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.szgz.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 172.31.2.7:8033/cmii/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: szgz
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 172.31.2.7:8033/cmii/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 172.31.2.7:8033/cmii/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/szgz/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 172.31.2.7:8033/cmii/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 172.31.2.7:8033/cmii/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 172.31.2.7:8033/cmii/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 172.31.2.9
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 172.31.2.9
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 172.31.2.7:8033/cmii/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 172.31.2.7:8033/cmii/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 172.31.2.7:8033/cmii/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 172.31.2.7:8033/cmii/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.szgz.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: szgz
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://172.31.2.7:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 172.31.2.7:8033/cmii/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 172.31.2.7
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: szgz/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: szgz/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 172.31.2.7:8033/cmii/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://172.31.2.9:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: szgz/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 172.31.2.7:8033/cmii/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://172.31.2.7:31935'
rtsp: 'rtsp://172.31.2.7:30554'
srt: 'srt://172.31.2.7:30556'
flv: 'http://172.31.2.7:30500'
hls: 'http://172.31.2.7:30500'
rtc: 'webrtc://172.31.2.7:30080'
replay: 'https://172.31.2.7:30333'
minio:
endpoint: http://172.31.2.9:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,103 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-awareness
namespace: uavcloud-demo
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-awareness
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 6.0.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-awareness
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-awareness
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-awareness
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-awareness:6.0.0
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-demo
- name: APPLICATION_NAME
value: cmii-uav-platform-awareness
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-awareness
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-awareness
namespace: uavcloud-demo
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-awareness
octopus.control: frontend-app-wdd
app.kubernetes.io/version: 6.0.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-awareness
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "awareness",
AppClientId: "empty"
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,34 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: uavcloud-devflight
@@ -30,28 +58,28 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
name: tenant-prefix-share
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
name: tenant-prefix-qingdao
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dispatchh5",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
@@ -72,14 +100,14 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
name: tenant-prefix-blockchain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smsecret",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
@@ -99,6 +127,174 @@ data:
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: uavcloud-devflight
@@ -114,14 +310,14 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
name: tenant-prefix-oms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "eventsh5",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
@@ -141,48 +337,6 @@ data:
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: uavcloud-devflight
@@ -212,43 +366,43 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
name: tenant-prefix-ai-brain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
name: tenant-prefix-splice
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
name: tenant-prefix-smauth
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
@@ -268,14 +422,28 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
name: tenant-prefix-uasms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uas",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
@@ -310,15 +478,43 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
name: tenant-prefix-supervision
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
@@ -338,126 +534,14 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
name: tenant-prefix-eventsh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "pilot2cloud",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
@@ -478,29 +562,29 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
name: tenant-prefix-emergency
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "blockchain",
AppClientId: "empty"
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
name: tenant-prefix-seniclive
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
@@ -520,83 +604,13 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
name: tenant-prefix-hljtt
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "cmsportal",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}

File diff suppressed because it is too large Load Diff

View File

@@ -19,6 +19,7 @@ metadata:
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/awareness)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
@@ -92,6 +93,11 @@ spec:
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /devflight/awareness/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-awareness
servicePort: 9528
- path: /devflight/base/?(.*)
pathType: ImplementationSpecific
backend:
@@ -626,6 +632,22 @@ spec:
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sec-awareness.uavcloud-devflight.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sec-awareness
servicePort: 8080
- host: cmii-uav-security-trace.uavcloud-devflight.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-trace
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-devflight.io
http:
paths:
@@ -706,8 +728,8 @@ metadata:
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.uavcloud-devflight.io

View File

@@ -1,342 +1,6 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: uavcloud-devflight
@@ -365,6 +29,20 @@ data:
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: uavcloud-devflight
@@ -380,14 +58,196 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
name: tenant-prefix-blockchain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dispatchh5",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
@@ -422,98 +282,42 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
name: tenant-prefix-media
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
name: tenant-prefix-uasms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "secenter",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
@@ -534,15 +338,43 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
name: tenant-prefix-secenter
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
@@ -562,28 +394,56 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
name: tenant-prefix-qinghaitourism
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
name: tenant-prefix-hljtt
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hyper",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
@@ -600,3 +460,143 @@ data:
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hyper",
AppClientId: "empty"
}

View File

@@ -8,7 +8,7 @@ metadata:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
app.kubernetes.io/version: 6.0.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
@@ -292,7 +292,7 @@ metadata:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
app.kubernetes.io/version: 6.0.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
@@ -626,6 +626,22 @@ spec:
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sec-awareness.uavcloud-devflight.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sec-awareness
servicePort: 8080
- host: cmii-uav-security-trace.uavcloud-devflight.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-trace
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-devflight.io
http:
paths:
@@ -700,14 +716,14 @@ metadata:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
app.kubernetes.io/version: 6.0.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.uavcloud-devflight.io

View File

@@ -458,7 +458,7 @@ data:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
version: 6.0.0
scanPackage: com.cmii.live.op
cloud:
nacos:
@@ -468,11 +468,11 @@ data:
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
group: 6.0.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
group: 6.0.0
refresh: true
discovery:
enabled: false

View File

@@ -50,6 +50,8 @@ var CmiiBackendAppMap = map[string]string{
"cmii-uav-fwdd": "5.7.0",
"cmii-uav-advanced5g": "5.7.0",
"cmii-sky-converge": "5.7.0",
"cmii-uav-sec-awareness": "5.7.0",
"cmii-uav-security-trace": "5.7.0",
}
var CmiiFrontendAppMap = map[string]string{
@@ -85,6 +87,7 @@ var CmiiFrontendAppMap = map[string]string{
"cmii-uavms-platform-security-center": "5.2.0",
"cmii-uav-platform-dispatchh5": "5.2.0",
"cmii-uavms-platform-manager": "5.2.0",
"cmii-uav-platform-awareness": "5.2.0",
}
var IgnoreCmiiBackendAppName = map[string]string{

View File

@@ -44,6 +44,7 @@ var FrontendShortNameMaps = map[string]string{
"cmii-uav-platform-dikongzhixingh5": "dikongzhixingh5",
"cmii-uavms-platform-manager": "uavmsmanager",
"cmii-uav-platform-renyike": "renyike",
"cmii-uav-platform-awareness": "awareness",
}
var FrontendClientIdMaps = map[string]string{
@@ -90,4 +91,5 @@ var FrontendClientIdMaps = map[string]string{
"cmii-uav-platform-dikongzhixingh5": "empty",
"cmii-uavms-platform-manager": "empty",
"cmii-uav-platform-renyike": "empty",
"cmii-uav-platform-awareness": "empty",
}

View File

@@ -263,7 +263,7 @@ spec:
imagePullPolicy: Always
- name: oss-adaptor
{{- if .HarborPort }}
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/cmii-srs-oss-adaptor:2023-SA
image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
{{- else }}
image: {{ .HarborIPOrCustomImagePrefix }}cmii-srs-oss-adaptor:2023-SA-skip-CHL
{{- end }}

View File

@@ -81,7 +81,7 @@ func TestCmiiEnvDeploy_ShenZhenGuiZiXiangMu(t *testing.T) {
WebIP: "172.31.2.7",
WebPort: "8088",
HarborIPOrCustomImagePrefix: "172.31.2.7",
HarborPort: "8033/cmii",
HarborPort: "8033/admin",
Namespace: "szgz",
TagVersion: "6.2.0",
TenantEnv: "",
@@ -91,7 +91,7 @@ func TestCmiiEnvDeploy_ShenZhenGuiZiXiangMu(t *testing.T) {
ApplyFilePrefix: "",
}
CmiiEnvDeployOffline(commonEnv, true, real_project.Cmii611ImageList)
CmiiEnvDeployOffline(commonEnv, true, real_project.Cmii620ArmImageList)
}
@@ -198,6 +198,26 @@ func TestCmiiEnvDeploy_XinDEMO(t *testing.T) {
}
func TestCmiiEnvDeploy_JiangXiOldShengJi(t *testing.T) {
// 江西 旧平台 升级
// 2025年2月20日
commonEnv := &z_dep.CommonEnvironmentConfig{
WebIP: "36.138.132.240",
WebPort: "30800",
HarborIPOrCustomImagePrefix: "10.20.1.130",
HarborPort: "8033",
Namespace: "jxyd",
TagVersion: "6.2.0",
TenantEnv: "",
MinioPublicIP: "",
MinioInnerIP: "10.20.1.132",
NFSServerIP: "10.20.1.132",
ApplyFilePrefix: "",
}
CmiiEnvDeployOffline(commonEnv, true, real_project.Cmii620ImageList)
}
func TestCmiiEnvDeploy_JiangXiTongGanErJiPingTai(t *testing.T) {
// 江西同感二级平台

View File

@@ -51,16 +51,12 @@ func TestPullFromEntityAndSyncConditionally(t *testing.T) {
sync := ImageSyncEntity{
DownloadCondition: &DownloadEntity{
ShouldDownloadImage: true,
ProjectName: "szgz_6.2.0",
ProjectVersion: "6.2.0-szgz-arm",
ProjectName: "6.2.0_demo",
ProjectVersion: "6.2.0-demo",
CmiiNameTagList: []string{
//"cmii-uav-mqtthandler:5.4.0-bjdyt-052102",
},
FullNameImageList: []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.2.0-szgz-arm",
},
FullNameImageList: []string{},
DownloadAuthUserName: "",
DownloadAuthPassword: "",
},

View File

@@ -0,0 +1,17 @@
bitnami/redis:6.2.14-debian-11-r1
bitnami/mysql:8.1.0-debian-11-r42
simonrupf/chronyd:0.4.3
bitnami/bitnami-shell:11-debian-11-r136
bitnami/rabbitmq:3.11.26-debian-11-r2
ossrs/srs:v5.0.195
emqx/emqx:5.5.1
nacos/nacos-server:v2.1.2-slim
mongo:5.0
bitnami/minio:2023.5.4
kubernetesui/dashboard:v2.0.1
kubernetesui/metrics-scraper:v1.0.4
nginx:1.24.0
dyrnq/nfs-subdir-external-provisioner:v4.0.2
jerrychina2020/rke-tools:v0.175-linux
jerrychina2020/rke-tools:v0.175
busybox:latest

View File

@@ -1,25 +1,18 @@
bitnami/redis:6.2.6-debian-10-r0
bitnami/redis:6.2.14-debian-11-r1
bitnami/mysql:8.0.35-debian-11-r1
bitnami/mysql:8.1.0-debian-11-r42
simonrupf/chronyd:0.4.3
bitnami/bitnami-shell:10-debian-10-r140
bitnami/bitnami-shell:11-debian-11-r136
bitnami/rabbitmq:3.9.12-debian-10-r3
bitnami/rabbitmq:3.11.26-debian-11-r2
ossrs/srs:v4.0.136
ossrs/srs:v5.0.195
emqx/emqx:4.2.12
emqx/emqx:5.5.1
nacos/nacos-server:v2.1.2
nacos/nacos-server:v2.1.2-slim
mongo:5.0
rabbitmq:3.9-management
bitnami/minio:2022.5.4
bitnami/minio:2023.5.4
kubernetesui/dashboard:v2.0.1
kubernetesui/metrics-scraper:v1.0.4
nginx:1.21.3
redis:6.0.20-alpine
nginx:1.24.0
dyrnq/nfs-subdir-external-provisioner:v4.0.2
jerrychina2020/rke-tools:v0.175-linux
jerrychina2020/rke-tools:v0.175
busybox:latest

View File

@@ -4,7 +4,9 @@ import (
"agent-wdd/config"
"agent-wdd/log"
"agent-wdd/op"
"agent-wdd/utils"
"fmt"
"github.com/spf13/cobra"
)
@@ -15,6 +17,8 @@ var (
centosCommonTools = []string{
"deltarpm", "net-tools", "iputils", "bind-utils", "lsof", "curl", "wget", "vim", "mtr", "htop",
}
dockerLocalInstallPath = "/root/wdd/docker-20.10.15.tgz" // 本地安装docker的文件路径
)
// 添加base子命令
@@ -77,7 +81,22 @@ func addDockerSubcommands(cmd *cobra.Command) {
Short: "网络安装Docker",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
// 检查参数
if len(args) > 0 {
fmt.Printf("Installing Docker version: %s\n", args[0])
}
// 没有传递参数,使用默认参数
version := "26.0.7"
log.Info("Installing Docker version: %s", version)
// 安装docker
packOperator := op.AgentPackOperator
packOperator.PackageInit()
// ubuntu 和 centos的安装命令是不是一样的
packOperator.Install([]string{"docker-ce", "docker-ce-cli", "containerd.io", "docker-buildx-plugin", "docker-compose-plugin"})
},
}
@@ -94,7 +113,14 @@ func addDockerSubcommands(cmd *cobra.Command) {
Short: "本地安装Docker",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Installing Docker from: %s\n", args[0])
exist := utils.FileExistAndNotNull(dockerLocalInstallPath)
if !exist {
log.Error("Docker local install file not found: %s", dockerLocalInstallPath)
return
}
// 解压文件
utils.Unzip(dockerLocalInstallPath, "/root/wdd")
},
}

View File

@@ -16,7 +16,7 @@ try {
Write-Host "4. Exec the command ..." -ForegroundColor Blue
Write-Host ""
Write-Host ""
ssh root@192.168.35.71 "chmod +x agent-wdd_linux_amd64 && ./agent-wdd_linux_amd64 base tools"
ssh root@192.168.35.71 "chmod +x agent-wdd_linux_amd64 && ./agent-wdd_linux_amd64 info disk"
Write-Host ""
Write-Host ""
Write-Host "5. Cheak Info Result ..." -ForegroundColor Blue

View File

@@ -53,6 +53,7 @@ func (op *PackageOperator) Install(tools []string) {
}
}
// PackageInit 初始化包管理器, 如果本机是ubuntu或者debian, 则使用apt, 否则使用yum
func (op *PackageOperator) PackageInit() {
log.Info("PackageInit !")