大量更新

This commit is contained in:
zeaslity
2025-02-21 14:00:16 +08:00
parent 20c4f57715
commit c5780f90d7
85 changed files with 50718 additions and 67 deletions

View File

@@ -16,7 +16,7 @@ all_server_list=(dev-worker-01 dev-worker-02 dev-worker-03 dev-worker-05 dev-wor
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104) all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
for server in "${all_server_list[@]}";do for server in "${all_server_list[@]}";do
echo "server is ${server}" echo "server is ${server}"
ssh root@"${server}" "echo root:V2ryStr@ngPss | sudo chpasswd" ssh root@"${server}" "df -TH"
done done

View File

@@ -0,0 +1,69 @@
#!/bin/bash
harbor_host=chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn
namespace=cqejpt
app_name=""
new_tag=""
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
# docker login -u admin -p V2ryStr@ngPss $harbor_host
docker login --username=zgydtxjtcqyxgs18883257311 --password Dict@2024 chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"

View File

@@ -0,0 +1,3 @@
山东移动-林燕 消息

View File

@@ -3,7 +3,7 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: cmii-uavms-platform-manager name: cmii-uavms-platform-manager
namespace: uavcloud-devoperation namespace: uavcloud-devflight
labels: labels:
cmii.type: frontend cmii.type: frontend
cmii.app: cmii-uavms-platform-manager cmii.app: cmii-uavms-platform-manager
@@ -32,7 +32,7 @@ spec:
imagePullPolicy: Always imagePullPolicy: Always
env: env:
- name: K8S_NAMESPACE - name: K8S_NAMESPACE
value: uavcloud-devoperation value: uavcloud-devflight
- name: APPLICATION_NAME - name: APPLICATION_NAME
value: cmii-uavms-platform-manager value: cmii-uavms-platform-manager
ports: ports:
@@ -71,7 +71,7 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: cmii-uavms-platform-manager name: cmii-uavms-platform-manager
namespace: uavcloud-devoperation namespace: uavcloud-devflight
labels: labels:
cmii.type: frontend cmii.type: frontend
cmii.app: cmii-uavms-platform-manager cmii.app: cmii-uavms-platform-manager
@@ -92,7 +92,7 @@ kind: ConfigMap
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: tenant-prefix-uasmanager name: tenant-prefix-uasmanager
namespace: uavcloud-devoperation namespace: uavcloud-devflight
data: data:
ingress-config.js: |- ingress-config.js: |-
var __GlobalIngressConfig = { var __GlobalIngressConfig = {

View File

@@ -0,0 +1,68 @@
#!/bin/bash
harbor_host=192.168.186.11:8033
namespace=ynydapp
app_name=""
new_tag=""
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
docker login -u admin -p V2ryStr@ngPss $harbor_host
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"

View File

@@ -0,0 +1,40 @@
#! /bin/bash
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend rootvg /dev/vdb
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
# mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"

View File

@@ -0,0 +1,168 @@
#!/bin/bash
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
DockerRegisterDomain="10.20.1.130:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
echo -e "\033[32m${1}\033[0m"
echo ""
}
print_red() {
echo -e "\033[31m${1}\033[0m"
echo ""
}
Download_Load_Tag_Upload() {
print_green "[DLTU] - start !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
dltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
dltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/6.2.0-demo/"
dltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
dltu() {
print_green "download all image name list and gzip file list!"
cd $local_gzip_path || exit
rm $all_image_list_txt
rm $gzip_image_list_txt
wget "$oss_prefix_url$all_image_list_txt"
wget "$oss_prefix_url$gzip_image_list_txt"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
echo ""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "download gzip file =>: $oss_prefix_url${i}"
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
echo "Gzip file download success : ${i}"
image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
else
print_red "Gzip file download FAILED : ${i}"
fi
echo "-------------------------------------------------"
done <"${gzip_image_list_txt}"
shift
}
Load_Tag_Upload(){
print_green "[LTU] - start to load image from offline !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
ltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
ltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/6.1.1"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/6.1.1/"
ltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
ltu(){
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
for file in $all_file_list; do
echo "offline gzip file is => : $file"
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
done
}
test(){
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
echo "extract short name is $app_name"
}
# test
Download_Load_Tag_Upload "cmii"
# Load_Tag_Upload "cmii"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,209 @@
nodes:
- address: 10.20.1.130
user: root
role:
- controlplane
- etcd
- worker
internal_address: 10.20.1.130
labels:
ingress-deploy: true
- address: 10.20.1.133
user: root
role:
- worker
internal_address: 10.20.1.133
- address: 10.20.1.134
user: root
role:
- worker
internal_address: 10.20.1.134
labels:
mysql-deploy: true
- address: 10.20.1.141
user: root
role:
- worker
internal_address: 10.20.1.141
labels:
ingress-deploy: true
- address: 10.20.1.142
user: root
role:
- worker
internal_address: 10.20.1.142
labels:
ingress-deploy: true
- address: 10.20.1.144
user: root
role:
- worker
internal_address: 10.20.1.144
labels:
ingress-deploy: true
- address: 10.20.1.145
user: root
role:
- worker
internal_address: 10.20.1.145
labels:
ingress-deploy: true
authentication:
strategy: x509
sans:
- "10.20.1.130"
private_registries:
- url: 10.20.1.130:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
# ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: eth0
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"

View File

@@ -0,0 +1,40 @@
rm octopus-agent_linux_amd64
wget http://42.192.52.227:9000/octopus/octopus-agent_linux_amd64
host_list=(10.129.80.217)
host_list=(10.20.1.133 10.20.1.134)
host_list=(10.20.1.141 10.20.1.142 10.20.1.144 10.20.1.145)
for server in "${host_list[@]}";do
echo "current ip is $server"
# scp /home/app/octopus-agent_linux_amd64 app@${server}:/home/app/octopus-agent_linux_amd64
# scp /root/wdd/disk.sh root@${server}:/root/wdd/
# ssh root@${server} "bash /root/wdd/disk.sh && lsblk"
# scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
# scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
# ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
# scp /etc/docker/daemon.json root@${server}:/etc/docker/daemon.json
# ssh root@${server} "systemctl restart docker"
# ssh root@${server} "docker info"
ssh root@${server} "yum install -y nfs-utils && systemctl enable nfs-client"
done
rm -f /usr/local/bin/octopus-agent
mv -f /home/app/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent
printf 'sshkey\n' | octopus-agent --mode=bastion
printf 'sshconfig\n' | octopus-agent --mode=bastion
# 修改可以使用root登录
cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back
sed -i

View File

@@ -0,0 +1,147 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
cmlc_app_image_list="cmlc-app-images-4.1.6.txt" # 需要修改版本
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
middleware_image_list="middleware-images.txt" # 一般不需要修改
#DockerRegisterDomain="20.47.129.116:8033" # 需要根据实际修改
DockerRegisterDomain="172.31.2.7:8033/admin" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
downloadAllNeededImages() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
shift
done
}
downloadAllNeededImagesAndCompress() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
shift
done
echo "已经完成打包工作!"
}
pushRKEImageToHarbor(){
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}");
done < "${rancher_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
case $i in
*/*)
image_name="${DockerRegisterDomain}/${i}"
;;
*)
image_name="${DockerRegisterDomain}/rancher/${i}"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${i}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushCMLCAPPImageToHarbor(){
app_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
app_images+=("${i}");
done < "${cmlc_app_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${app_images[@]}"; do
[ -z "${app}" ] && continue
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushMiddlewareImageToHarbor(){
middleware_image=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
middleware_image+=("${i}");
done < "${middleware_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${middleware_image[@]}"; do
[ -z "${app}" ] && continue
case ${app} in
*/*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
;;
*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
#downloadAllNeededImagesAndCompress "${middleware_image_list}"
# downloadAllNeededImages "${rancher_image_list}"
pushRKEImageToHarbor
#pushCMLCAPPImageToHarbor
#pushMiddlewareImageToHarbor

View File

@@ -0,0 +1,168 @@
#!/bin/bash
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/wdd/octopus_image"
DockerRegisterDomain="172.31.2.7:8033/admin" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
echo -e "\033[32m${1}\033[0m"
echo ""
}
print_red() {
echo -e "\033[31m${1}\033[0m"
echo ""
}
Download_Load_Tag_Upload() {
print_green "[DLTU] - start !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
dltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
dltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/ahejpt/"
dltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
dltu() {
print_green "download all image name list and gzip file list!"
cd $local_gzip_path || exit
rm $all_image_list_txt
rm $gzip_image_list_txt
wget "$oss_prefix_url$all_image_list_txt"
wget "$oss_prefix_url$gzip_image_list_txt"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
echo ""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "download gzip file =>: $oss_prefix_url${i}"
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
echo "Gzip file download success : ${i}"
image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
else
print_red "Gzip file download FAILED : ${i}"
fi
echo "-------------------------------------------------"
done <"${gzip_image_list_txt}"
shift
}
Load_Tag_Upload(){
print_green "[LTU] - start to load image from offline !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
ltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
ltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/cmii/"
ltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
ltu(){
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
for file in $all_file_list; do
echo "offline gzip file is => : $file"
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
done
}
test(){
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
echo "extract short name is $app_name"
}
# test
#Download_Load_Tag_Upload "rke"
Load_Tag_Upload "cmii"

View File

@@ -0,0 +1,49 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
yum install lvm2 -y
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend rootvg /dev/bdb1
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
# mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
lsblk

View File

@@ -0,0 +1,49 @@
hostname: 172.31.2.7
http:
port: 8033
harbor_admin_password: V2ryStr@ngPss
database:
password: V2ryStr@ngPss
max_idle_conns: 50
max_open_conns: 1000
conn_max_lifetime: 3600
conn_max_idle_time: 3600
data_volume: /var/lib/docker/harbor-data
jobservice:
max_job_workers: 10
job_loggers:
- STD_OUTPUT
- FILE
logger_sweeper_duration: 3
notification:
webhook_job_max_retry: 10
webhook_job_http_client_timeout: 10
log:
level: warning
local:
rotate_count: 50
rotate_size: 200M
location: /var/log/harbor
cache:
enabled: false
expire_hours: 24
_version: 2.9.0
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- trivy

View File

@@ -0,0 +1,63 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/coreos-flannel:v0.13.0-rancher1
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/cluster-proportional-autoscaler:1.8.1
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,213 @@
nodes:
- address: 172.31.2.7
user: root
role:
- controlplane
- etcd
- worker
internal_address: 172.31.2.7
labels:
ingress-deploy: true
- address: 172.31.2.8
user: root
role:
- worker
internal_address: 172.31.2.8
labels:
ingress-deploy: true
- address: 172.31.2.9
user: root
role:
- worker
internal_address: 172.31.2.9
labels:
ingress-deploy: true
mysql-deploy: true
authentication:
strategy: x509
sans:
- "172.31.2.7"
private_registries:
- url: 172.31.2.7:8033/admin # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
#ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: enp4s0
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,9 @@
#!/bin/bash
sed -i "s/AllowTcpForwarding no/AllowTcpForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/AllowAgentForwarding no/AllowAgentForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/StrictModes yes/StrictModes no/g" /etc/ssh/sshd_config
systemctl restart sshd
hostnamectl set-hostname 172-31-2-8

View File

@@ -0,0 +1,16 @@
https://172.31.2.7:39999
eyJhbGciOiJSUzI1NiIsImtpZCI6IkQ2SExTeWxObkxTMllFQ0Z5ZTMwakpaS01FbHFnSzc5RWY0cVJVVXNfOGcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWxrNjRnIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkZmM1YWE4NC05NWRlLTRjOTUtYmVmNi1jOWNlYjdjMzc3ZjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.JOyeNRJHdOMeP1PRfi6x0LMfu6XiuSEIXTVRF0T2xNzv9thdEzjbFZA9j6GI4fWyKZb4IO0xABzjVbkYRW_KIUSosDT0QzngqIq6L1O3gGBgzDKlGpBNFvrbxZgwxcJir_piSP23uvOlMSW9uv4GMLEZixcwNmy_zVVwPbEtrBLSGWwZK9nfa2tYpWesUO70OdWXdP7lW6MRN4i_8RD37-85RY_YD_pYAYGiJtf9cIW6ao630a9RO7nuCDdUuNa9REjYUQqt9ksS_PL277k27e1UBV0yBxKYMxpwhqD4QshfXBMHw56K5F9syGdP7NosBmFmtcHCblEb5nNTHzatJw
journalctl -u nfs-server -n 100 -f
journalctl -u nfs-idmapd.service -n 100 -f
systemctl status nfs-server
docker image ls | grep redis
docker tag bitnami/redis:6.2.6-debian-10-r0 172.31.2.7:8033/admin/cmii/redis:6.2.6-debian-10-r0
docker push 172.31.2.7:8033/admin/cmii/redis:6.2.6-debian-10-r0
docker image inspect 172.31.2.7:8033/admin/cmii/rabbitmq:3.9.12-debian-10-r3

View File

@@ -0,0 +1,31 @@
yum remove docker net-tools iputils bind-utils lsof curl wget vim mtr htop
mv octopus-agent_linux_arm64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent
host_list=(172.31.2.7 172.31.2.8 172.31.2.9)
host_list=(172.31.2.8 172.31.2.9)
for server in "${host_list[@]}";do
echo "current ip is $server"
# ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'sshkey\n' | octopus-agent --mode=bastion"
# ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
#ssh root@${server} "mkdir /root/wdd"
# scp /root/wdd/disk.sh root@${server}:/root/wdd/
# ssh root@${server} "bash /root/wdd/disk.sh && lsblk"
# ssh root@${server} "yum install -y rpcbind net-tools iputils bind-utils lsof curl wget vim mtr htop docker docker-compose"
scp /etc/docker/daemon.json root@${server}:/etc/docker/
ssh root@${server} "systemctl restart docker && sleep 3 && docker info"
done
docker container stop $(docker ps -qa)
printf "y/n" | docker container prune
docker image rmi $(docker image ls -qa) --force

View File

@@ -0,0 +1,29 @@
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/harbor-offline-installer-aarch64-v2.12.2.tgz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/harbor-offline-installer-aarch64-v2.12.2.tgz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/middleware-images.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/middleware-images-arm64.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/ts2mp4_docker_image_arm64_v1.0.0.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/docker_release_ts2mp4_arm64.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/go1.23.5.linux-arm64.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/harbor-offline-installer-aarch64-v2.12.2.tgz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/mino-arm-RELEASE.2023-06-02T23-17-26Z.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/kubernetes-rke-tools.75.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/kubectl-1.20.4-arm64
https://oss.demo.uavcmlc.com/cmlc-installation/arm64/rke_linux-arm64
mc cp cmlc=cmii=cmii-uav-sense-adapter=6.2.0-szgz-arm.tar.gz demo/cmlc-installation/6.2.0-szgz-arm
mc cp cmlc=cmii=cmii-uav-advanced5g=6.2.0-szgz-arm.tar.gz demo/cmlc-installation/6.2.0-szgz-arm
mc cp cmlc=cmii=cmii-uav-mission=6.2.0-szgz-arm.tar.gz demo/cmlc-installation/6.2.0-szgz-arm
demo/cmlc-installation/6.2.0-szgz-arm
mc ls demo/cmlc-installation/arm64

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,602 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: szgz
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "172.31.2.7:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 172.31.2.7:8033/admin/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 172.31.2.7:8033/admin/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,277 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: szgz
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "szgz"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
# auth.client.1.clientid = admin
# auth.client.1.password = odD8#Ve7.B
# auth.client.2.clientid = cmlc
# auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 172.31.2.7:8033/admin/cmii/emqx:4.4.19
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/acl.conf"
subPath: "acl.conf"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
emptyDir: {}
# persistentVolumeClaim:
# claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: szgz
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: szgz
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: szgz
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: szgz
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,730 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: szgz
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.szgz.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: szgz
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-szgz.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: szgz
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.szgz.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,77 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 172.31.2.7:8033/admin/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
emptyDir: {}
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: szgz
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: szgz
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 172.31.2.7:8033/admin/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 172.31.2.7:8033/admin/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/szgz/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: szgz
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 172.31.2.7:8033/admin/cmii/nacos-server:v2.1.2-slim
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 172.31.2.7:8033/admin/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 172.31.2.7:8033/admin/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 172.31.2.7
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 172.31.2.7
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,329 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: szgz
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: szgz
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 172.31.2.7:8033/admin/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 172.31.2.7:8033/admin/cmii/rabbitmq:3.11.26-debian-11-r2
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
emptyDir: {}
# persistentVolumeClaim:
# claimName: helm-rabbitmq

View File

@@ -0,0 +1,591 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
# Set memory swappiness to 0
echo never > /sys/fs/cgroup/memory/memory.swappiness
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
ARGS+=("--memory-swappiness" "0") # 添加这一行
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 172.31.2.7:8033/admin/cmii/redis:6.2.14-debian-11-r1
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "true"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: MEMORY-SWAPPINESS
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: szgz
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 0
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: szgz
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: szgz
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 172.31.2.7:8033/admin/cmii/redis:6.2.14-debian-11-r1
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.szgz.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: szgz
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://172.31.2.7:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 172.31.2.7:8033/admin/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 172.31.2.7
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: szgz/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: szgz/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 172.31.2.7:8033/admin/cmii/cmii-srs-oss-adaptor:v5.2.0-arm
env:
- name: OSS_ENDPOINT
value: 'http://172.31.2.9:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: szgz/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 172.31.2.7:8033/admin/cmii/cmii-live-operator:v5.2.0-arm
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: szgz
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://172.31.2.7:31935'
rtsp: 'rtsp://172.31.2.7:30554'
srt: 'srt://172.31.2.7:30556'
flv: 'http://172.31.2.7:30500'
hls: 'http://172.31.2.7:30500'
rtc: 'webrtc://172.31.2.7:30080'
replay: 'https://172.31.2.7:30333'
minio:
endpoint: http://172.31.2.9:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,24 @@
#!/bin/bash
kubectl scale deployment --all --replicas=0 -n uavcloud-dev
kubectl scale statefulset --all --replicas=0 -n uavcloud-dev
kubectl scale deployment --all --replicas=0 -n uavcloud-devflight
kubectl scale statefulset --all --replicas=0 -n uavcloud-devflight
kubectl scale deployment --all --replicas=0 -n uavcloud-devoperation
kubectl scale statefulset --all --replicas=0 -n uavcloud-devoperation
export name_space=uavcloud-devflight
export name_space=uavcloud-devoperation
export name_space=uavcloud-dev
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force
kubectl get pods -n $name_space -o json | jq -r '.items[] | select(.status.containerStatuses[0].ready == false) | .metadata.name' | xargs -r kubectl delete pod -n $name_space --force
/var/lib/docker/containers/8380daef87f894cdc0ba98f0b528c39945f6d5c603129fe68fd7f00132d89001/8380daef87f894cdc0ba98f0b528c39945f6d5c603129fe68fd7f00132d89001-json.log

View File

@@ -0,0 +1,9 @@
kind: Secret
apiVersion: v1
metadata:
name: harborsecret
namespace: uavcloud-demo
data:
.dockerconfigjson: >-
ewoJImF1dGhzIjogewoJCSJoYXJib3ItcWEuc3JlLmNkY3l5LmNuIjogewoJCQkiYXV0aCI6ICJjbUZrTURKZlpISnZibVU2UkhKdmJtVkFNVEl6TkE9PSIKCQl9LAogICAgICAgICAgICAgICAgImhhcmJvci5jZGN5eS5jb20uY24iOiB7CgkJCSJhdXRoIjogImNtRmtNREpmWkhKdmJtVTZSSEp2Ym1WQU1USXpOQT09IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy45IChsaW51eCkiCgl9Cn0=
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis-uas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis-uas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-uas-configuration
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-uas-health
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-uas-scripts
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-uas-headless
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-uas-master
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-uas-replicas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-uas-master
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-uas-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis-uas
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis-uas
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-uas-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-uas-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-uas-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-uas-replicas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/component: replica
serviceName: helm-redis-uas-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis-uas
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-uas-master-0.helm-redis-uas-headless.uavcloud-demo.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis-uas
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis-uas
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-uas-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-uas-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-uas-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

View File

@@ -0,0 +1,180 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-mianyangh5
namespace: uavcloud-demo
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangh5
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 6.2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangh5
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangh5
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-mianyangh5
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mianyangh5:6.2.0-demo
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-demo
- name: APPLICATION_NAME
value: cmii-uav-platform-mianyangh5
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-mianyangh5
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-mianyangh5
namespace: uavcloud-demo
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangh5
octopus.control: frontend-app-wdd
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangh5
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-mianyangbackend
namespace: uavcloud-demo
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangbackend
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 6.2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangbackend
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangbackend
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-mianyangbackend
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mianyangbackend:6.2.0-demo
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-demo
- name: APPLICATION_NAME
value: cmii-uav-platform-mianyangbackend
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-mianyangbackend
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-mianyangbackend
namespace: uavcloud-demo
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangbackend
octopus.control: frontend-app-wdd
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-mianyangbackend
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,574 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-demo
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "www.demo.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: harbor.cdcyy.com.cn/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: harbor.cdcyy.com.cn/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,276 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: uavcloud-demo
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: uavcloud-demo
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "uavcloud-demo"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: uavcloud-demo
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Cr628
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Cr628
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Cr628
auth.user.2.username = cmlc
auth.user.2.password = odD8#Cr628
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: uavcloud-demo
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: harbor.cdcyy.com.cn/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-demo
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: uavcloud-demo
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: uavcloud-demo
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: uavcloud-demo
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: uavcloud-demo
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,720 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: uavcloud-demo
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: www.demo.uavcmlc.com
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: uavcloud-demo
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: uavcloud-demo
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: www.demo.uavcmlc.com
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- path: /iam/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: uavcloud-demo
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: uavcloud-demo
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: harbor.cdcyy.com.cn/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: uavcloud-demo
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-demo
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-demo
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: harbor.cdcyy.com.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: harbor.cdcyy.com.cn/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/uavcloud-demo/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: uavcloud-demo
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "192.168.35.205"
mysql.port: "13306"
mysql.user: "k8s_admin"
mysql.password: "Uc@bkTCPuD4g"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: uavcloud-demo
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: uavcloud-demo
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: harbor.cdcyy.com.cn/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: harbor.cdcyy.com.cn/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: harbor.cdcyy.com.cn/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 192.168.118.15
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.118.15
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: uavcloud-demo
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: uavcloud-demo
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: uavcloud-demo
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: uavcloud-demo
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = sMSk81Plb1
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-demo
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-demo
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: uavcloud-demo
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: harbor.cdcyy.com.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: harbor.cdcyy.com.cn/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis-uas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis-uas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-uas-configuration
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-uas-health
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-uas-scripts
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-uas-headless
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-uas-master
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-uas-replicas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-uas-master
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-uas-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis-uas
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis-uas
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-uas-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-uas-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-uas-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-uas-replicas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 0
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/component: replica
serviceName: helm-redis-uas-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis-uas
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-uas-master-0.helm-redis-uas-headless.uavcloud-demo.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis-uas
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis-uas
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-uas-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-uas-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-uas-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: uavcloud-demo
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: uavcloud-demo
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: harbor.cdcyy.com.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.uavcloud-demo.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: uavcloud-demo
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://www.demo.uavcmlc.com;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: harbor.cdcyy.com.cn/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: www.demo.uavcmlc.com
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: uavcloud-demo/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: uavcloud-demo/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
env:
- name: OSS_ENDPOINT
value: 'http://192.168.118.15:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: uavcloud-demo/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: uavcloud-demo
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://www.demo.uavcmlc.com:31935'
rtsp: 'rtsp://www.demo.uavcmlc.com:30554'
srt: 'srt://www.demo.uavcmlc.com:30556'
flv: 'http://www.demo.uavcmlc.com:30500'
hls: 'http://www.demo.uavcmlc.com:30500'
rtc: 'webrtc://www.demo.uavcmlc.com:30080'
replay: 'https://www.demo.uavcmlc.com:30333'
minio:
endpoint: http://192.168.118.15:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,276 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: jxyd
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "jxyd"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Ve7.B
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 10.20.1.130:8033/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: jxyd
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: jxyd
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: jxyd
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: jxyd
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: jxyd
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://36.138.132.240:30800;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30557
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30556
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 10.20.1.130:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 36.138.132.240
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: jxyd/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: jxyd/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 10.20.1.130:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
env:
- name: OSS_ENDPOINT
value: 'http://10.20.1.132:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: jxyd/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 10.20.1.130:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: jxyd
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://36.138.132.240:31935'
rtsp: 'rtsp://36.138.132.240:30554'
srt: 'srt://36.138.132.240:30556'
flv: 'http://36.138.132.240:30500'
hls: 'http://36.138.132.240:30500'
rtc: 'webrtc://36.138.132.240:30556'
replay: 'https://36.138.132.240:30333'
minio:
endpoint: http://10.20.1.132:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

Binary file not shown.

View File

@@ -1,15 +1,25 @@
#!/bin/bash #!/bin/bash
# 下载agent
wget http://42.192.52.227:9000/octopus/octopus-agent_linux_amd64
mv ./octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-amd64-20.10.15.tgz
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/harbor-offline-installer-v2.9.0.tgz # 下载依赖文件
mkdir /root/wdd/
cd /root/wdd
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-linux-x86_64-v2.18.0 export oss_url_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd
# export oss_url_prefix=http://42.192.52.227:9000/octopus
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/rke_linux-amd64 wget ${oss_url_prefix}/docker-amd64-20.10.15.tgz
wget ${oss_url_prefix}/docker-compose-linux-x86_64-v2.18.0
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip wget ${oss_url_prefix/harbor-offline-installer-v2.9.0.tgz
wget ${oss_url_prefix}/rke_linux-amd64
wget ${oss_url_prefix}/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip

View File

@@ -5,37 +5,45 @@ host_list=(10.129.80.218 10.129.80.245 10.129.80.222 10.129.80.223)
host_list=(10.129.80.217 10.129.80.245 10.129.80.222 10.129.80.223) host_list=(10.129.80.217 10.129.80.245 10.129.80.222 10.129.80.223)
for ip in "${host_list[@]}";do for server in "${host_list[@]}";do
echo "current ip is $ip" echo "current ip is $server"
ssh root@${server} "curl 172.24.152.72" ssh root@${server} "curl 172.24.152.72"
done done
disk
10.129.80.245 10.129.80.245
mv /root/wdd/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent wget http://42.192.52.227:9000/octopus/octopus-agent_linux_amd64
mv octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent chmod +x /usr/local/bin/octopus-agent
wget http://42.192.52.227:9000/octopus/docker-amd64-20.10.15.tgz
ssh root@${server} "mkdir /root/wdd" ssh root@${server} "mkdir /root/wdd"
scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${server}:/root/wdd/ scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${server}:/root/wdd/ scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
scp /root/wdd/disk.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/disk.sh && lsblk"
ssh root@${server} "chmod +x /usr/local/bin/octopus-agent" ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'sshkey\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
printf 'dockerconfig\n' | octopus-agent --mode=bastion
scp /etc/docker/daemon.json root@${server}:/etc/docker/ scp /etc/docker/daemon.json root@${server}:/etc/docker/
ssh root@${server} "systemctl restart docker && sleep 3 && docker info" ssh root@${server} "systemctl restart docker && sleep 3 && docker info"
sed -i '/^$/d' ~/.ssh/* sed -i '/^$/d' ~/.ssh/*
@@ -43,7 +51,12 @@ sed -i '/^$/d' ~/.ssh/*
chmod +x /usr/local/bin/octopus-agent chmod +x /usr/local/bin/octopus-agent
printf 'firewall\n' | octopus-agent --mode=bastion printf 'firewall\n' | octopus-agent --mode=bastion
printf 'sysconfig\n' | octopus-agent --mode=bastion printf 'sysconfig\n' | octopus-agent --mode=bastion
printf 'sshkey\n' | octopus-agent --mode=bastion
printf 'sshconfig\n' | octopus-agent --mode=bastion
printf 'swap\n' | octopus-agent --mode=bastion printf 'swap\n' | octopus-agent --mode=bastion
printf 'selinux\n' | octopus-agent --mode=bastion printf 'selinux\n' | octopus-agent --mode=bastion
printf 'docker\n' | octopus-agent --mode=bastion printf 'docker\n' | octopus-agent --mode=bastion
printf 'dockercompose\n' | octopus-agent --mode=bastion printf 'dockercompose\n' | octopus-agent --mode=bastion
printf 'dockerconfig\n' | octopus-agent --mode=bastion

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
namespace=zjyd namespace=jxyd
install_yq() { install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
@@ -43,7 +43,7 @@ backup_all_stateful_sets() {
echo "" echo ""
} }
# install_yq install_yq
backup_all_deployment backup_all_deployment
backup_all_service backup_all_service
backup_all_stateful_sets backup_all_stateful_sets

View File

@@ -0,0 +1,5 @@
docker run -d --name emqx -p 38085:18083 -p 31883:1883 -p 38083:8083 docker.107421.xyz/emqx/emqx:v4.0.0

View File

@@ -1,3 +1,7 @@
#/bin/bash
# https://blog.csdn.net/weixin_43902588/article/details/142279993 参考文档
export QUAY=/var/lib/docker/quay export QUAY=/var/lib/docker/quay
mkdir -p $QUAY/postgres mkdir -p $QUAY/postgres
@@ -15,18 +19,30 @@ docker exec -it postgresql /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg
docker inspect -f "{{.NetworkSettings.IPAddress}}" postgresql docker inspect -f "{{.NetworkSettings.IPAddress}}" postgresql
docker run -it --name redis \ # 不需要执行
docker run -it --rm --name redis \
-p 6379:6379 \ -p 6379:6379 \
redis:6.2.14 \ redis:6.2.14 \
redis-server --test-memory redis-server --test-memory 4096
docker inspect -f "{{.NetworkSettings.IPAddress}}" redis
docker run --rm -it --name quay_config -p 8080:8080 quay.io/projectquay/quay:3.11.1 config secret
mkdir $QUAY/storage mkdir $QUAY/storage
mkdir $QUAY/config mkdir $QUAY/config
docker run -d --name redis \
-p 6379:6379 \
-m 4g \
redis:6.2.14 \
--requirepass strongpassword
docker inspect -f "{{.NetworkSettings.IPAddress}}" redis
# 运行config服务 需要浏览器打开 http://172.31.2.7:8080/,并用 quayconfig/secret 登录。
docker run --rm -it --name quay_config -p 8080:8080 quay.io/projectquay/quay:3.11.1 config secret
setfacl -m u:1001:-wx $QUAY/storage setfacl -m u:1001:-wx $QUAY/storage
setfacl -m u:1001:-wx $QUAY/config setfacl -m u:1001:-wx $QUAY/config
@@ -38,12 +54,12 @@ docker run -p 8033:8080 \
-d quay.io/projectquay/quay:3.11.1 -d quay.io/projectquay/quay:3.11.1
docker run -d --name redis \ # 登录dashboard 8033 创建账户和密码 创建项目
-p 6379:6379 \ admin
-m 4g \ V2ryStr@ngPss
redis:6.2.14 \
--requirepass strongpassword
docker login --tls-verify=false quay:8033
V2ryStr@ngPss docker login --tls-verify=false -u admin -p V2ryStr@ngPss 172.31.2.7:8033
docker login -u admin -p V2ryStr@ngPss 172.31.2.7:8033
docker pull 172.31.2.7:8033/admin/cmii

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
name_space=jlyd name_space=uavcloud-demo
delete_all_fronted_cmii_pod(){ delete_all_fronted_cmii_pod(){
@@ -26,4 +26,4 @@ delete_all_backend_cmii_pod(){
done done
} }
delete_all_fronted_cmii_pod delete_all_backend_cmii_pod

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
harbor_host=10.40.51.5:8033 harbor_host=10.20.1.130:8033
namespace=jsntejpt namespace=jxyd
app_name="" app_name=""
new_tag="" new_tag=""

View File

@@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
minio_local_path=/var/lib/docker/minio-pv/pv1 minio_local_path=/var/lib/docker/minio-pv/pv1
harbor_host=192.168.40.42:8033 harbor_host=172.31.2.7:8033/admin
inner_master_ip=192.168.40.42 inner_master_ip=172.31.2.7
minio_host_ip=192.168.40.193 minio_host_ip=172.31.2.9
install_minio(){ install_minio(){
@@ -22,13 +22,13 @@ services:
ports: ports:
- "9000:9000" - "9000:9000"
- "9001:9001" - "9001:9001"
image: '${harbor_host}/cmii/minio:2022.5.4' image: '${harbor_host}/cmii/minio:2023.5.4'
environment: environment:
- MINIO_ROOT_USER=cmii - MINIO_ROOT_USER=cmii
- MINIO_ROOT_PASSWORD=B#923fC7mk - MINIO_ROOT_PASSWORD=B#923fC7mk
restart: always restart: always
volumes: volumes:
- ${minio_local_path}:/data - ${minio_local_path}:/mnt/data
EOF EOF
echo "start minio container !" echo "start minio container !"
@@ -99,7 +99,7 @@ init_minio(){
} }
install_docker_compose # install_docker_compose
install_minio install_minio

View File

@@ -1,10 +1,8 @@
upstream proxy_server { upstream proxy_server {
ip_hash; ip_hash;
server 192.168.40.42:30500; server 172.31.2.7:30500;
server 192.168.40.183:30500; server 172.31.2.8:30500;
server 192.168.40.130:30500; server 172.31.2.9:30500;
server 192.168.40.131:30500;
server 192.168.40.174:30500;
} }
server { server {
@@ -23,7 +21,7 @@ server {
proxy_buffering off; proxy_buffering off;
proxy_buffer_size 4k; proxy_buffer_size 4k;
proxy_buffers 4 12k; proxy_buffers 4 12k;
proxy_set_header Host fake-domain.lnydyj.io; proxy_set_header Host fake-domain.szgz.io;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";

View File

@@ -1,6 +1,10 @@
#!/bin/bash #!/bin/bash
mkdir /root/.kube
cp kube_config_cluster.yml /root/.kube/config
kubectl apply -f k8s-dashboard.yaml kubectl apply -f k8s-dashboard.yaml
kubectl delete -f k8s-dashboard.yaml
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
@@ -24,10 +28,10 @@ kubectl apply -f k8s-emqx.yaml
kubectl delete -f k8s-emqx.yaml kubectl delete -f k8s-emqx.yaml
kubectl apply -f k8s-rabbitmq.yaml kubectl apply -f k8s-rabbitmq.yaml
kubectl delete -f k8s-rabbitmq.yam kubectl delete -f k8s-rabbitmq.yaml
kubectl apply -f k8s-redis.yaml kubectl apply -f k8s-redis.yaml
kubectl delete -f k8s-redis.yamll kubectl delete -f k8s-redis.yaml
kubectl apply -f k8s-mysql.yaml kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml kubectl delete -f k8s-mysql.yaml
@@ -42,12 +46,15 @@ kubectl delete -f k8s-nacos.yaml
kubectl apply -f k8s-configmap.yaml kubectl apply -f k8s-configmap.yaml
kubectl delete -f k8s-configmap.yaml kubectl delete -f k8s-configmap.yaml
vi k8s-ingress.yaml
kubectl apply -f k8s-ingress.yaml kubectl apply -f k8s-ingress.yaml
kubectl delete -f k8s-ingress.yaml kubectl delete -f k8s-ingress.yaml
vi k8s-frontend.yaml
kubectl apply -f k8s-frontend.yaml kubectl apply -f k8s-frontend.yaml
kubectl delete -f k8s-frontend.yaml kubectl delete -f k8s-frontend.yaml
vi k8s-backend.yaml
kubectl apply -f k8s-backend.yaml kubectl apply -f k8s-backend.yaml
kubectl delete -f k8s-backend.yaml kubectl delete -f k8s-backend.yaml

View File

@@ -2,11 +2,10 @@
kubectl get nodes --show-labels kubectl get nodes --show-labels
kubectl label nodes 192.168.40.158 uavcloud.env=lnydyj --overwrite kubectl label nodes 172.31.2.7 uavcloud.env=szgz --overwrite
kubectl label nodes 192.168.40.183 uavcloud.env=lnydyj --overwrite kubectl label nodes 172.31.2.8 uavcloud.env=szgz --overwrite
kubectl label nodes 192.168.40.131 uavcloud.env=lnydyj --overwrite kubectl label nodes 172.31.2.9 uavcloud.env=szgz --overwrite
kubectl label nodes 192.168.40.130 uavcloud.env=lnydyj --overwrite
kubectl label nodes 192.168.40.174 uavcloud.env=lnydyj --overwrite
# 删除label # 删除label
kubectl label nodes 192.168.40.193 uavcloud.env- kubectl label nodes 192.168.40.193 uavcloud.env-

View File

@@ -53,6 +53,12 @@ clean_rke_cluster() {
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F
ip6tables -F && ip6tables -t nat -F && ip6tables -t mangle -F && ip6tables -t raw -F ip6tables -F && ip6tables -t nat -F && ip6tables -t mangle -F && ip6tables -t raw -F
rke remove --force
printf "y/n" | docker container prune
rke -d up
} }
clean_rke_cluster clean_rke_cluster

View File

@@ -0,0 +1,154 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
cmlc_app_image_list="szga-0711.txt" # 需要修改版本
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
middleware_image_list="middleware-images.txt" # 一般不需要修改
#DockerRegisterDomain="20.47.129.116:8033" # 需要根据实际修改
DockerRegisterDomain="172.31.2.7:8033/admin" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
downloadAllNeededImages() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull --platform linux/arm64 "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
shift
done
}
downloadAllNeededImagesAndCompress() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull --platform linux/arm64 "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
shift
done
echo "已经完成打包工作!"
}
pushRKEImageToHarbor(){
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}");
done < "${rancher_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
case $i in
*/*)
image_name="${DockerRegisterDomain}/${i}"
;;
*)
image_name="${DockerRegisterDomain}/rancher/${i}"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${i}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushCMLCAPPImageToHarbor(){
app_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
app_images+=("${i}");
done < "${cmlc_app_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${app_images[@]}"; do
[ -z "${app}" ] && continue
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushMiddlewareImageToHarbor(){
middleware_image=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
middleware_image+=("${i}");
done < "${middleware_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${middleware_image[@]}"; do
[ -z "${app}" ] && continue
case ${app} in
*/*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
;;
*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
# downloadAllNeededImagesAndCompress "${middleware_image_list}"
#downloadAllNeededImages "${rancher_image_list}"
#pushRKEImageToHarbor
#pushCMLCAPPImageToHarbor
pushMiddlewareImageToHarbor

View File

@@ -23,6 +23,10 @@ downloadAllNeededImages() {
echo "开始下载:${i}" echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}" echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}" pulled="${pulled} ${i}"
else else
if docker inspect "${i}" >/dev/null 2>&1; then if docker inspect "${i}" >/dev/null 2>&1; then
@@ -45,6 +49,9 @@ downloadAllNeededImagesAndCompress() {
echo "开始下载:${i}" echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}" echo "Image pull success: ${i}"
# 增加检查,镜像 的架构
docker image inspect ${i} | grep Architecture
pulled="${pulled} ${i}" pulled="${pulled} ${i}"
else else
if docker inspect "${i}" >/dev/null 2>&1; then if docker inspect "${i}" >/dev/null 2>&1; then
@@ -139,7 +146,7 @@ pushMiddlewareImageToHarbor(){
} }
downloadAllNeededImagesAndCompress "${cmlc_app_image_list}" downloadAllNeededImagesAndCompress "${middleware_image_list}"
#downloadAllNeededImages "${rancher_image_list}" #downloadAllNeededImages "${rancher_image_list}"
#pushRKEImageToHarbor #pushRKEImageToHarbor

View File

@@ -1,13 +1,13 @@
# https://dl.min.io/client/mc/release/windows-amd64/mc.exe # https://dl.min.io/client/mc/release/windows-amd64/mc.exe
#mc.exe alias set uav-demo https://oss.demo.uavcmlc.com cmii B#923fC7mk & "C:\Users\makn\Downloads\mc.exe" alias set uav-demo https://oss.demo.uavcmlc.com cmii B#923fC7mk
mc.exe ls uav-demo/cmlc-installation/6.1.1/ | ForEach-Object { & "C:\Users\makn\Downloads\mc.exe" ls uav-demo/cmlc-installation/6.2.0-szgz-arm/ | ForEach-Object {
$item=($_.Split()[-1]) $item=($_.Split()[-1])
Write-Host "start to download $item " Write-Host "start to download $item "
mc.exe get uav-demo/cmlc-installation/6.1.1/$item "D:\CmiiDeployOffline\ZheJiangErJiPingTai\" & "C:\Users\makn\Downloads\mc.exe" get uav-demo/cmlc-installation/6.2.0-szgz-arm/$item "D:\Desktop\cmii\cmii\"
Write-Host "" Write-Host ""
} }

View File

@@ -0,0 +1,63 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/coreos-flannel:v0.13.0-rancher1
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/cluster-proportional-autoscaler:1.8.1
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,19 @@
bitnami/redis:6.2.14-debian-11-r1
bitnami/mysql:8.1.0-debian-11-r42
simonrupf/chronyd:0.4.3
bitnami/bitnami-shell:11-debian-11-r136
bitnami/rabbitmq:3.11.26-debian-11-r2
ossrs/srs:v5.0.195
emqx/emqx:4.4.19
emqx/emqx:5.5.1
nacos/nacos-server:v2.1.2-slim
mongo:5.0
bitnami/minio:2023.5.4
kubernetesui/dashboard:v2.0.1
kubernetesui/metrics-scraper:v1.0.4
nginx:1.24.0
redis:6.0.20-alpine
dyrnq/nfs-subdir-external-provisioner:v4.0.2
jerrychina2020/rke-tools:v0.175-linux
jerrychina2020/rke-tools:v0.175
busybox:latest

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
harbor_host=10.40.51.5:8033 harbor_host=172.31.2.7:8033/admin
namespace=zygazww namespace=szgz
app_name="" app_name=""
new_tag="" new_tag=""

View File

@@ -1,10 +1,10 @@
#!/bin/bash #!/bin/bash
# 将sql文件上传到mysql的目录 # 将sql文件上传到mysql的目录 /var/lib/docker/mysql-pv
# 修改目录的权限为 chown 1001:1001 ./sql_file/ # 修改目录的权限为 chown 1001:1001 ./6.2.0/*
# 然后执行mysql的pod # 然后执行mysql的pod
export sql_import_file_path="/bitnami/mysql/master_data_6.2.0" export sql_import_file_path="/bitnami/mysql/6.2.0"
for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do
echo "current file is ${sql_file}" echo "current file is ${sql_file}"
@@ -23,4 +23,8 @@ done
# /root/wdd/mysql/bin/mysql -uroot -pGwubc6CxRM -h192.168.35.178 -P33308 <"$sql_import_file_path/${sql_file}" # /root/wdd/mysql/bin/mysql -uroot -pGwubc6CxRM -h192.168.35.178 -P33308 <"$sql_import_file_path/${sql_file}"
# chongqing san hua # chongqing san hua
# /root/wdd/mysql/bin/mysql -uroot -pQzfXQhd3bQ -h36.133.115.164 -P53309 <"$sql_import_file_path/${sql_file}" # /root/wdd/mysql/bin/mysql -uroot -pQzfXQhd3bQ -h36.133.115.164 -P53309 <"$sql_import_file_path/${sql_file}"
# 江西移动升级
/root/wdd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 < new_schema.sql

View File

@@ -40,3 +40,6 @@ yum install ncurses-compat-libs.x86_64
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.tar.gz

View File

@@ -2,10 +2,10 @@
# 数据表备份 # 数据表备份
export mysql_exec_file_prefix=/data/lingyun/mysql-8.0.38-linux-glibc2.12-x86_64/bin export mysql_exec_file_prefix=/root/wdd/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin
export mysql_port=33663 export mysql_port=33306
export SQL_DUMP_FILE=/root/wdd/all_tables_5.7.0.sql export SQL_DUMP_FILE=/root/wdd/update_416_620/all_tables_4.1.6.sql
export SQL_FULL_BACK_UP_FILE=/root/wdd/all_tables_5.7.0_20241210_fullback.sql export SQL_FULL_BACK_UP_FILE=/root/wdd/update_416_620/all_tables_4.1.6_20250217_fullback.sql
export SQL_DDL_FILE="/mnt/d/NextCloudData/MasterLoad/Work/UAV Cloud/22.希腊项目-阿里云-德国/999-数据库脚本/5.7.0" export SQL_DDL_FILE="/mnt/d/NextCloudData/MasterLoad/Work/UAV Cloud/22.希腊项目-阿里云-德国/999-数据库脚本/5.7.0"
backup_all_structure() { backup_all_structure() {

View File

@@ -0,0 +1,165 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-uav-tsmpf-cm
namespace: yzyd
data:
server_config_docker.ini: >
#minio=mc alias set minIO https://oss.demo.uavcmlc.com:18000/ cmii
B#923fC7mk
minio=mc alias set minIO http://192.168.0.17:39010/ cmii B#923fC7mk
http_port=8192
docker_logs_path=/workspace/logs
docker_temp_folder=/workspace/temp_folder
mp4_file_size=500
upload_url=no used.
query_file_url=no used.
download_url=no_useful_now.
send_complete_msg_url=no used.
logs_path=no_useful_now.
temp_folder=no_useful_now.
#需要排除的合并任务(这些任务由于ts文件不存在导致无法成功)
exclude_task_codes=
#是否执行playback的任务,0:false; 1:true;
merge_playback=1
#下载ts文件方式http, mc
download_type=mc
#仅合成某个公司的文件;(可用于希望优先下载某个公司的文件的场景); 数字半角逗号分割;
only_download_company_ids=
#是否记录磁盘日志; 0:false, 1:true
log_to_disk=1
#(integer)日志级别; 1:error, 2:warn, 3:info, 4:debug
log_level=2
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uav-tsmpf
namespace: yzyd
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-tsmpf
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: server-config-docker
configMap:
name: cmii-uav-tsmpf-cm
items:
- key: server_config_docker.ini
path: server_config_docker.ini
containers:
- name: cmii-uav-tsmpf
image: '192.168.0.14:8033/cmii/tsmpf_docker_image:v2.0.1'
ports:
- name: pod-port
containerPort: 8192
protocol: TCP
env:
- name: K8S_NAMESPACE
value: yzyd
- name: APPLICATION_NAME
value: cmii-uav-tsmpf
resources:
requests:
cpu: 500m
memory: 2Gi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: yzyd/cmii-uav-tsmpf
- name: server-config-docker
mountPath: /workspace/server_config_docker.ini
subPath: server_config_docker.ini
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- demo
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uav-tsmpf
namespace: yzyd
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-tsmpf
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8192
targetPort: 8192
nodePort: 38192
selector:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
type: NodePort

View File

@@ -7,6 +7,10 @@ NFSServer的IP地址 N1C2IP
MINIO节点的公网IP M2C1IP MINIO节点的公网IP M2C1IP
MINIO节点的内网IP M2D2IP MINIO节点的内网IP M2D2IP
Harbor 8033
admin
V2ryStr@ngPss
DashBoard DashBoard
39999 39999