初始化项目

This commit is contained in:
zeaslity
2025-03-27 16:09:20 +08:00
parent e09a32d1e8
commit fc2d585489
709 changed files with 516391 additions and 0 deletions

6
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,21 @@
apiVersion: v2
appVersion: 2.1.7
description: Apache Doris is an easy-to-use, high-performance and real-time analytical
database based on MPP architecture, known for its extreme speed and ease of use.
home: https://doris.apache.org
icon: https://charts.selectdb.com/images/doris.jpg
keywords:
- doris
- database
- olap
- sharding
kubeVersion: '>= 1.19'
maintainers:
- email: hudechao@selectdb.com
name: Dechao Hu
name: doris
sources:
- https://github.com/apache/doris
- https://github.com/apache/doris-operator
type: application
version: 25.2.1

View File

@@ -0,0 +1,61 @@
# Deploy Doris Cluster by Helm
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/doris)](https://artifacthub.io/packages/search?repo=doris)
This chart for deploying doris on kubernetes use Doris-Operator. Before using this chart, please install doris-operator as [install doc](https://artifacthub.io/packages/helm/doris/doris-operator).
## Install doris
### Add helm-chart repo and install doris-operator
this document and doris-operator installation document are duplicated. you can skip If they have already been executed completely.
1. Add the selectdb repository
```Bash
$ helm repo add selectdb https://charts.selectdb.com
```
2. Update the Helm Chart Repo to the latest version
```Bash
$ helm repo update selectdb
```
3. Check the Helm Chart Repo is the latest version
```Bash
$ helm search repo selectdb
NAME CHART VERSION APP VERSION DESCRIPTION
selectdb/doris-operator 1.3.1 1.3.1 Doris-operator for doris creat ...
selectdb/doris 1.3.1 2.0.3 Apache Doris is an easy-to-use ...
```
4. Install doris-operator (with default config in a namespace named `doris`)
```Bash
$ helm install operator selectdb/doris-operator
```
### Install the doris use doriscluster
- Use default config for deploying doris
This deploy only deploy fe and be components using default storageClass for providing persistent volume.
```bash
$ helm install doriscluster selectdb/doris
```
- Custom doris deploying
when you want to specify resources or different deployment type, please custom the [`values.yaml`](./values.yaml) and use next command for deploying.
```bash
$ helm install -f values.yaml doriscluster selectdb/doris
```
### Validate installation status
After executing the installation command, deployment and distribution, service deployment scheduling and startup will take a certain amount of time. Check the deployment status of Pods through the kubectl get pods command.
Observe that the Pod of `doriscluster` is in the `Running` state and all containers in the Pod are ready, that means, the deployment is successful.
```Bash
$ kubectl get pod --namespace doris
NAME READY STATUS RESTARTS AGE
doriscluster-helm-fe-0 1/1 Running 0 1m39s
doriscluster-helm-fe-1 1/1 Running 0 1m39s
doriscluster-helm-fe-2 1/1 Running 0 1m39s
doriscluster-helm-be-0 1/1 Running 0 16s
doriscluster-helm-be-1 1/1 Running 0 16s
doriscluster-helm-be-2 1/1 Running 0 16s
```
## Uninstall doriscluster
Please confirm the Doris is not used, when using next command to uninstall `doriscluster`.
```bash
$ helm uninstall doriscluster
```

View File

@@ -0,0 +1 @@
Thank you for installing {{ .Chart.Name }}-{{ .Chart.Version }}

View File

@@ -0,0 +1,267 @@
{*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*}
{{- define "doriscluster.name" -}}
{{ default .Chart.Name .Values.dorisCluster.name }}
{{- end }}
{{- define "doriscluster.namespace" -}}
{{ print .Release.Namespace }}
{{- end }}
{{- define "kube-control.name" -}}
{{- print "doris-operator" }}
{{- end }}
{{/*
doris cluster pod default resource.
*/}}
{{- define "doriscluster.default.resource" }}
requests:
cpu: 8
memory: 16Gi
limits:
cpu: 16
memory: 32Gi
{{- end }}
{{/*
doris cluster admin user and password secret name.
*/}}
{{- define "doriscluster.secret.name" -}}
{{ template "doriscluster.name" . }}-secret-base64
{{- end -}}
{{/*
doris cluster fe configMap default name.
*/}}
{{- define "doriscluster.default.feConfigMap.name" -}}
{{ template "doriscluster.name" . }}-fe-configmap
{{- end -}}
{{/*
doris cluster fe pod default configMap resolve file.
*/}}
{{- define "doriscluster.default.feConfig.resolveKey" }}
{{- print "fe.conf" }}
{{- end }}
{{/*
doris cluster be configMap default name.
*/}}
{{- define "doriscluster.default.beConfigMap.name" -}}
{{ template "doriscluster.name" . }}-be-configmap
{{- end -}}
{{/*
doris cluster pod default configMap resolve file.
*/}}
{{- define "doriscluster.default.beConfig.resolveKey" }}
{{- print "be.conf" }}
{{- end }}
{{/*
doris cluster cn configMap default name.
*/}}
{{- define "doriscluster.default.cnConfigMap.name" -}}
{{ template "doriscluster.name" . }}-cn-configmap
{{- end -}}
{{/*
doris cluster cn pod default configMap resolve file.
*/}}
{{- define "doriscluster.default.cnConfig.resolveKey" }}
{{- print "be.conf" }}
{{- end }}
{{/*
doris cluster broker configMap default name.
*/}}
{{- define "doriscluster.default.brokerConfigMap.name" -}}
{{ template "doriscluster.name" . }}-broker-configmap
{{- end -}}
{{/*
doris cluster broker pod default configMap resolve file.
*/}}
{{- define "doriscluster.default.brokerConfig.resolveKey" }}
{{- print "apache_hdfs_broker.conf" }}
{{- end }}
{{/*
doris cluster cn pod autoscaler default version.
*/}}
{{- define "doriscluster.default.autoScalerVersion" -}}
{{- print "v2" }}
{{- end -}}
{{/*
doris cluster fe PVC
*/}}
{{- define "doriscluster.fe.pvc" -}}
{{- if and .Values.feSpec.persistentVolumeClaim.metaPersistentVolume .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storage}}
- mountPath: /opt/apache-doris/fe/doris-meta
name: fe-meta
persistentVolumeClaimSpec:
{{- if or .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
storageClassName: {{ default .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storage}}
{{- end }}
{{- if and .Values.feSpec.persistentVolumeClaim.logsPersistentVolume .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
- mountPath: /opt/apache-doris/fe/log
name: fe-log
persistentVolumeClaimSpec:
{{- if or .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName}}
storageClassName: {{ default .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
{{- end }}
{{- end -}}
{{/*
doris cluster be PVC
*/}}
{{- define "doriscluster.be.pvc" -}}
{{- if and .Values.beSpec.persistentVolumeClaim.dataPersistentVolume .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
- mountPath: /opt/apache-doris/be/storage
name: be-storage
persistentVolumeClaimSpec:
{{- if or .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
storageClassName: {{ default .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
{{- end }}
{{- if and .Values.beSpec.persistentVolumeClaim.logsPersistentVolume .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
- mountPath: /opt/apache-doris/be/log
name: be-log
persistentVolumeClaimSpec:
{{- if or .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
storageClassName: {{ default .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
{{- end }}
{{- end -}}
{{/*
doris cluster cn PVC
*/}}
{{- define "doriscluster.cn.pvc" -}}
{{- if and .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
- mountPath: /opt/apache-doris/be/storage
name: cn-storage
persistentVolumeClaimSpec:
{{- if or .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
storageClassName: {{ default .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
{{- end }}
{{- if and .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
- mountPath: /opt/apache-doris/be/log
name: cn-log
persistentVolumeClaimSpec:
{{- if or .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
storageClassName: {{ default .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
{{- end }}
{{- end -}}
{{/*
doris cluster broker PVC
*/}}
{{- define "doriscluster.broker.pvc" -}}
{{- if and .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
- mountPath: /opt/apache-doris/apache_hdfs_broker/log
name: broker-log
persistentVolumeClaimSpec:
{{- if .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName}}
storageClassName: {{ .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
{{- end }}
{{- end -}}
{{- define "doriscluster.feConfig.configMaps" }}
{{- range .Values.feSpec.configMap.mountConfigMaps }}
- configMapName: {{ .configMapName }}
mountPath: {{ .mountPath }}
{{- end }}
{{- end }}
{{- define "doriscluster.beConfig.configMaps" }}
{{- range .Values.beSpec.configMap.mountConfigMaps }}
- configMapName: {{ .configMapName }}
mountPath: {{ .mountPath }}
{{- end }}
{{- end }}
{{- define "doriscluster.cnConfig.configMaps" }}
{{- range .Values.cnSpec.configMap.mountConfigMaps }}
- configMapName: {{ .configMapName }}
mountPath: {{ .mountPath }}
{{- end }}
{{- end }}
{{- define "doriscluster.brokerConfig.configMaps" }}
{{- range .Values.brokerSpec.configMap.mountConfigMaps }}
- configMapName: {{ .configMapName }}
mountPath: {{ .mountPath }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,31 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
{{- $originalMap := omit .Values.beSpec.configMap "mountConfigMaps" }}
{{- if $originalMap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "doriscluster.default.beConfigMap.name" . }}
namespace: {{ template "doriscluster.namespace" . }}
labels:
app.kubernetes.io/name: doriscluster
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
data:
{{- toYaml $originalMap | nindent 2}}
{{- end }}

View File

@@ -0,0 +1,31 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
{{- $originalMap := omit .Values.brokerSpec.configMap "mountConfigMaps" }}
{{- if $originalMap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "doriscluster.default.brokerConfigMap.name" . }}
namespace: {{ template "doriscluster.namespace" . }}
labels:
app.kubernetes.io/name: doriscluster
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
data:
{{- toYaml $originalMap | nindent 2}}
{{- end }}

View File

@@ -0,0 +1,31 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
{{- $originalMap := omit .Values.cnSpec.configMap "mountConfigMaps" }}
{{- if $originalMap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "doriscluster.default.cnConfigMap.name" . }}
namespace: {{ template "doriscluster.namespace" . }}
labels:
app.kubernetes.io/name: doriscluster
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
data:
{{- toYaml $originalMap | nindent 2}}
{{- end }}

View File

@@ -0,0 +1,31 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
{{- $originalMap := omit .Values.feSpec.configMap "mountConfigMaps" }}
{{- if $originalMap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "doriscluster.default.feConfigMap.name" . }}
namespace: {{ template "doriscluster.namespace" . }}
labels:
app.kubernetes.io/name: doriscluster
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
data:
{{- toYaml $originalMap | nindent 2}}
{{- end }}

View File

@@ -0,0 +1,56 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
{{$cmDatas := list }}
{{- range $cp := .Values.feSpec.configMap.mountConfigMaps }}
{{- if $cp.data }}
{{- $cmDatas = append $cmDatas $cp }}
{{- end }}
{{- end }}
{{- range $index, $cp := .Values.beSpec.configMap.mountConfigMaps }}
{{- if $cp.data }}
{{- $cmDatas = append $cmDatas $cp }}
{{- end }}
{{- end }}
{{- range $index, $cp := .Values.brokerSpec.configMap.mountConfigMaps }}
{{- if $cp.data }}
{{- $cmDatas = append $cmDatas $cp }}
{{- end }}
{{- end }}
{{- range $index, $cp := .Values.cnSpec.configMap.mountConfigMaps }}
{{- if $cp.data }}
{{- $cmDatas = append $cmDatas $cp }}
{{- end }}
{{- end }}
{{- range $cp := $cmDatas }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $cp.configMapName }}
namespace: {{ template "doriscluster.namespace" $ }}
labels:
app.kubernetes.io/name: doriscluster
app.kubernetes.io/instance: {{ template "doriscluster.name" $ }}
app.kubernetes.io/part-of: {{ template "kube-control.name" $ }}
data:
{{- toYaml $cp.data | nindent 2 }}
---
{{- end }}

View File

@@ -0,0 +1,368 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This yaml have dorisCluster CRD all fields. The yaml only for reference how to config, not for deploy on kubernetes.
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
name: {{ template "doriscluster.name" . }}
namespace: {{ template "doriscluster.namespace" . }}
labels:
app.kubernetes.io/name: doriscluster
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
{{- if .Values.dorisCluster.annotations }}
annotations:
{{- toYaml .Values.dorisCluster.annotations | nindent 4 }}
{{- end }}
spec:
{{- if .Values.dorisCluster.adminUser }}
adminUser:
{{- toYaml .Values.dorisCluster.adminUser | nindent 4 }}
{{- else if .Values.dorisCluster.authSecret }}
authSecret: {{ template "doriscluster.secret.name" . }}
{{- end }}
feSpec:
replicas: {{ .Values.feSpec.replicas }}
{{- if .Values.feSpec.electionNumber }}
electionNumber: {{ .Values.feSpec.electionNumber }}
{{- end }}
{{- if .Values.feSpec.labels }}
podLabels:
{{- toYaml .Values.feSpec.labels | nindent 6 }}
{{- end }}
image: {{ .Values.feSpec.image.repository }}:{{ .Values.feSpec.image.tag }}
{{- if .Values.feSpec.image.imagePullSecrets }}
imagePullSecrets:
{{- toYaml .Values.feSpec.image.imagePullSecrets | nindent 4 }}
{{- end }}
{{- if .Values.feSpec.service }}
service:
type: {{ .Values.feSpec.service.type }}
{{- if and (eq "LoadBalancer" .Values.feSpec.service.type) .Values.feSpec.service.loadbalancerIP }}
loadBalancerIP: {{ .Values.feSpec.service.loadbalancerIP }}
{{- end }}
{{- if .Values.feSpec.service.servicePorts }}
servicePorts:
{{- toYaml .Values.feSpec.service.servicePorts | nindent 6 }}
{{- end }}
{{- if .Values.feSpec.service.annotations }}
annotations:
{{- toYaml .Values.feSpec.service.annotations | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.feSpec.configMap }}
configMapInfo:
{{- $originalMap := .Values.feSpec.configMap }}
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
{{- if $defaultMap }}
configMapName: {{ template "doriscluster.default.feConfigMap.name" . }}
resolveKey: {{ template "doriscluster.default.feConfig.resolveKey" . }}
{{- end }}
{{- if .Values.feSpec.configMap.mountConfigMaps }}
configMaps:
{{- template "doriscluster.feConfig.configMaps" . }}
{{- end }}
{{- end }}
{{- if .Values.feSpec.resource }}
{{- toYaml .Values.feSpec.resource | nindent 4 }}
{{- else }}
{{- include "doriscluster.default.resource" .}}
{{- end }}
{{- if .Values.feSpec.nodeSelector }}
nodeSelector:
{{- toYaml .Values.feSpec.nodeSelector | nindent 6 }}
{{- end }}
{{- if .Values.feSpec.env }}
envVars:
{{- toYaml .Values.feSpec.env | nindent 4 }}
{{- end }}
{{- if .Values.feSpec.affinity }}
affinity:
{{- toYaml .Values.feSpec.affinity | nindent 6 }}
{{- end }}
{{- if .Values.feSpec.tolerations }}
tolerations:
{{- toYaml .Values.feSpec.tolerations | nindent 4 }}
{{- end }}
{{- if .Values.feSpec.hostAliases }}
hostAliases:
{{- toYaml .Values.feSpec.hostAliases | nindent 4 }}
{{- end }}
{{- if .Values.feSpec.persistentVolumeClaim }}
persistentVolumes:
{{- template "doriscluster.fe.pvc" . }}
{{- end }}
{{- if .Values.feSpec.systemInitialization }}
systemInitialization:
{{- if .Values.feSpec.systemInitialization.initImage }}
initImage: {{ .Values.feSpec.systemInitialization.initImage }}
{{- end }}
{{- if .Values.feSpec.systemInitialization.command }}
command: {{ .Values.feSpec.systemInitialization.command }}
{{- end }}
{{- end }}
beSpec:
replicas: {{ .Values.beSpec.replicas }}
{{- if .Values.beSpec.labels }}
podLabels:
{{- toYaml .Values.beSpec.labels | nindent 6 }}
{{- end }}
image: {{ .Values.beSpec.image.repository }}:{{ .Values.beSpec.image.tag }}
{{- if .Values.beSpec.image.imagePullSecrets }}
imagePullSecrets:
{{- toYaml .Values.beSpec.image.imagePullSecrets | nindent 4 }}
{{- end }}
{{- if .Values.beSpec.service }}
service:
type: {{ .Values.beSpec.service.type }}
{{- if and (eq "LoadBalancer" .Values.beSpec.service.type) .Values.beSpec.service.loadbalancerIP }}
loadBalancerIP: {{ .Values.beSpec.service.loadbalancerIP }}
{{- end }}
{{- if .Values.beSpec.service.servicePorts }}
servicePorts:
{{- toYaml .Values.beSpec.service.servicePorts | nindent 6 }}
{{- end }}
{{- if .Values.beSpec.service.annotations }}
annotations:
{{- toYaml .Values.beSpec.service.annotations | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.beSpec.configMap }}
configMapInfo:
{{- $originalMap := .Values.beSpec.configMap }}
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
{{- if $defaultMap }}
configMapName: {{ template "doriscluster.default.beConfigMap.name" . }}
resolveKey: {{ template "doriscluster.default.beConfig.resolveKey" . }}
{{- end }}
{{- if .Values.beSpec.configMap.mountConfigMaps }}
configMaps:
{{- template "doriscluster.beConfig.configMaps" . }}
{{- end }}
{{- end }}
{{- if .Values.beSpec.resource }}
{{- toYaml .Values.beSpec.resource | nindent 4 }}
{{- else }}
{{- include "doriscluster.default.resource" .}}
{{- end }}
{{- if .Values.beSpec.nodeSelector }}
nodeSelector:
{{- toYaml .Values.beSpec.nodeSelector | nindent 6 }}
{{- end }}
{{- if .Values.beSpec.env }}
envVars:
{{- toYaml .Values.beSpec.env | nindent 4 }}
{{- end }}
{{- if .Values.beSpec.affinity }}
affinity:
{{- toYaml .Values.beSpec.affinity | nindent 6 }}
{{- end }}
{{- if .Values.beSpec.tolerations }}
tolerations:
{{- toYaml .Values.beSpec.tolerations | nindent 4 }}
{{- end }}
{{- if .Values.beSpec.hostAliases }}
hostAliases:
{{- toYaml .Values.beSpec.hostAliases | nindent 4 }}
{{- end }}
{{- if .Values.beSpec.persistentVolumeClaim }}
persistentVolumes:
{{- template "doriscluster.be.pvc" . }}
{{- end }}
{{- if .Values.beSpec.systemInitialization }}
systemInitialization:
{{- if .Values.beSpec.systemInitialization.initImage }}
initImage: {{ .Values.beSpec.systemInitialization.initImage }}
{{- end }}
{{- if .Values.beSpec.systemInitialization.command }}
command: {{ .Values.beSpec.systemInitialization.command }}
{{- end }}
{{- end }}
{{- if .Values.dorisCluster.enabledCn }}
cnSpec:
replicas: {{ .Values.cnSpec.replicas }}
{{- if .Values.cnSpec.labels }}
podLabels:
{{- toYaml .Values.cnSpec.labels | nindent 6 }}
{{- end }}
image: {{ .Values.cnSpec.image.repository }}:{{ .Values.cnSpec.image.tag }}
{{- if .Values.cnSpec.image.imagePullSecrets }}
imagePullSecrets:
{{- toYaml .Values.cnSpec.image.imagePullSecrets | nindent 4 }}
{{- end }}
{{- if .Values.cnSpec.service }}
service:
type: {{ .Values.cnSpec.service.type }}
{{- if and (eq "LoadBalancer" .Values.cnSpec.service.type) .Values.cnSpec.service.loadbalancerIP }}
loadBalancerIP: {{ .Values.cnSpec.service.loadbalancerIP }}
{{- end }}
{{- if .Values.cnSpec.service.servicePorts }}
servicePorts:
{{- toYaml .Values.cnSpec.service.servicePorts | nindent 6 }}
{{- end }}
{{- if .Values.cnSpec.service.annotations }}
annotations:
{{- toYaml .Values.cnSpec.service.annotations | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.cnSpec.configMap }}
configMapInfo:
{{- $originalMap := .Values.cnSpec.configMap }}
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
{{- if $defaultMap }}
configMapName: {{ template "doriscluster.default.cnConfigMap.name" . }}
resolveKey: {{ template "doriscluster.default.cnConfig.resolveKey" . }}
{{- end }}
{{- if .Values.cnSpec.configMap.mountConfigMaps }}
configMaps:
{{- template "doriscluster.cnConfig.configMaps" . }}
{{- end }}
{{- end }}
{{- if .Values.cnSpec.resource }}
{{- toYaml .Values.cnSpec.resource | nindent 4 }}
{{- else }}
{{- include "doriscluster.default.resource" .}}
{{- end }}
{{- if .Values.cnSpec.nodeSelector }}
nodeSelector:
{{- toYaml .Values.cnSpec.nodeSelector | nindent 6 }}
{{- end }}
{{- if .Values.cnSpec.env }}
envVars:
{{- toYaml .Values.cnSpec.env | nindent 4 }}
{{- end }}
{{- if .Values.cnSpec.affinity }}
affinity:
{{- toYaml .Values.cnSpec.affinity | nindent 6 }}
{{- end }}
{{- if .Values.cnSpec.tolerations }}
tolerations:
{{- toYaml .Values.cnSpec.tolerations | nindent 4 }}
{{- end }}
{{- if .Values.cnSpec.hostAliases }}
hostAliases:
{{- toYaml .Values.cnSpec.hostAliases | nindent 4 }}
{{- end }}
{{- if .Values.cnSpec.persistentVolumeClaim }}
persistentVolumes:
{{- template "doriscluster.cn.pvc" . }}
{{- end }}
{{- if .Values.cnSpec.systemInitialization }}
systemInitialization:
{{- if .Values.cnSpec.systemInitialization.initImage }}
initImage: {{ .Values.cnSpec.systemInitialization.initImage }}
{{- end }}
{{- if .Values.cnSpec.systemInitialization.command }}
command: {{ .Values.cnSpec.systemInitialization.command }}
{{- end }}
{{- end }}
{{- if .Values.cnSpec.autoScalingPolicy.enable }}
autoScalingPolicy:
version: {{default ( include "doriscluster.default.autoScalerVersion" . ) .Values.cnSpec.autoScalingPolicy.version }}
minReplicas: {{ .Values.cnSpec.autoScalingPolicy.minReplicas}}
maxReplicas: {{ .Values.cnSpec.autoScalingPolicy.maxReplicas}}
hpaPolicy:
{{- toYaml .Values.cnSpec.autoScalingPolicy.hpaPolicy | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.dorisCluster.enabledBroker }}
brokerSpec:
replicas: {{ .Values.brokerSpec.replicas }}
{{- if .Values.brokerSpec.labels }}
podLabels:
{{- toYaml .Values.brokerSpec.labels | nindent 6 }}
{{- end }}
image: {{ .Values.brokerSpec.image.repository }}:{{ .Values.brokerSpec.image.tag }}
{{- if .Values.brokerSpec.image.imagePullSecrets }}
imagePullSecrets:
{{- toYaml .Values.brokerSpec.image.imagePullSecrets | nindent 4 }}
{{- end }}
{{- if .Values.brokerSpec.configMap }}
configMapInfo:
{{- $originalMap := .Values.brokerSpec.configMap }}
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
{{- if $defaultMap }}
configMapName: {{ template "doriscluster.default.brokerConfigMap.name" . }}
resolveKey: {{ template "doriscluster.default.brokerConfig.resolveKey" . }}
{{- end }}
{{- if .Values.brokerSpec.configMap.mountConfigMaps }}
configMaps:
{{- template "doriscluster.brokerConfig.configMaps" . }}
{{- end }}
{{- end }}
{{- if .Values.brokerSpec.resource }}
{{- toYaml .Values.brokerSpec.resource | nindent 4 }}
{{- else }}
{{- include "doriscluster.default.resource" .}}
{{- end }}
{{- if .Values.brokerSpec.nodeSelector }}
nodeSelector:
{{- toYaml .Values.brokerSpec.nodeSelector | nindent 6 }}
{{- end }}
{{- if .Values.brokerSpec.env }}
envVars:
{{- toYaml .Values.brokerSpec.env | nindent 4 }}
{{- end }}
{{- if .Values.brokerSpec.affinity }}
affinity:
{{- toYaml .Values.brokerSpec.affinity | nindent 6 }}
{{- end }}
{{- if .Values.brokerSpec.tolerations }}
tolerations:
{{- toYaml .Values.brokerSpec.tolerations | nindent 4 }}
{{- end }}
{{- if .Values.brokerSpec.hostAliases }}
hostAliases:
{{- toYaml .Values.brokerSpec.hostAliases | nindent 4 }}
{{- end }}
{{- if .Values.brokerSpec.persistentVolumeClaim }}
persistentVolumes:
{{- template "doriscluster.broker.pvc" . }}
{{- end }}
{{- if .Values.brokerSpec.systemInitialization }}
systemInitialization:
{{- if .Values.brokerSpec.systemInitialization.initImage }}
initImage: {{ .Values.brokerSpec.systemInitialization.initImage }}
{{- end }}
{{- if .Values.brokerSpec.systemInitialization.command }}
command: {{ .Values.brokerSpec.systemInitialization.command }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,31 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
{{- if .Values.dorisCluster.authSecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "doriscluster.secret.name" . }}
namespace: {{ template "doriscluster.namespace" . }}
labels:
app.kubernetes.io/name: doriscluster
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
type: kubernetes.io/basic-auth
data:
{{- toYaml .Values.dorisCluster.authSecret | nindent 2}}
{{- end }}

View File

@@ -0,0 +1,697 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# deploy a doris cluster
dorisCluster:
# the name of doriscluster, if not set, the chart name will be used.
name: "doriscluster-helm"
# annotations for doris cluster.
annotations: {}
# specify the cn deploy or not.
enabledCn: false
# specify the broker deploy or not.
enabledBroker: false
# specify the doriscluster adminuser or not.
# set adminUser will record the password in clear text. This method has been abandoned and is not recommended.
# When adding adminuser. should follow the steps:
# 1. Start dorisclusert without adminUser
# 2. Run 'CREATE USER ...' or 'SET PASSWORD ...' in mysql client
# 3. Add the name and password added in the previous step to adminUser and execute helm
adminUser: {}
# name: root
# password: "12345"
# use authSecret can encrypt and save passwords, recommend this method
# 1. run shell: echo -n '{your_password}' | base64 to get password base64 string
# 2. run shell: echo -n '{your_user}' | base64 to get user base64 string
# 3. Fill the encrypted string into the corresponding position
# as follow, username is 'root' , password is 't0p-Secret'
authSecret: {}
# username: cm9vdA==
# password: dDBwLVNlY3JldA==
feSpec:
replicas: 3
# electionNumber represents `FOLLOWER` number, replicas - electionNumber as `OBSERVER`
# electionNumber: 3
# the pod labels for user select or classify pods.
labels: {}
# selectdb.app.classify: "classify-1"
image:
repository: apache/doris
tag: fe-2.1.7
# imagePullSecrets:
# - name: imagePullSecret
# config the service port for fe service.
# if you want to use a dedicated port for fe service, you can config the port.
# see https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports for more details.
service:
# the fe service type, only supported ClusterIP, NodePort, LoadBalancer
type: ClusterIP
# type: NodePort
# servicePorts:
# - nodePort: 30001
# targetPort: 9030
# - nodePort: 30002
# targetPort: 8030
# the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty.
# type: "LoadBalancer"
# loadbalancerIP: "127.0.0.1"
# use LoadBalancer on cloud platform, try to configure it using annotations:
annotations: {}
# service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
# specify the configMap, as a dictionary.
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
configMap: {}
# 'fe.conf' is the startup config file of fe, and the name must be 'fe.conf'.
# fe.conf: |
# http_port = 8030
# rpc_port = 9020
# query_port = 9030
# edit_log_port = 9010
# Other config files can also be specified here
# hdfs-site.xml: |
# <?xml version="1.0" encoding="UTF-8"?>
#
# <configuration>
# <property>
# <name>dfs.nameservices</name>
# <value>ns</value>
# </property>
#
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
# but please note that the mounting path cannot be repeated.
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
# in the following example, the user needs to manually create configmap 'cm-1'
# mountConfigMaps: {}
# - configMapName: cm-1
# mountPath: /etc/cm1
# - configMapName: cm-2
# mountPath: /etc/cm2
# data:
# copy_file1: |
# text *** content
# copy_file2: |
# text *** content
# If configured separately here, it will overwrite the total resources configuration default.
# but the default configuration of other types will still take effect.
resource: {}
# requests:
# cpu: 8
# memory: 16Gi
# limits:
# cpu: 16
# memory: 32Gi
# If specified, the pod's nodeSelectordisplayName="Map of nodeSelectors to match when scheduling pods on nodes"
nodeSelector: {}
# kubernetes.io/arch: amd64
# kubernetes.io/os: linux
# app.kubernetes.node.name: "k8s-node1"
# env represents an environment variable present in a Container.
# 'name' of the environment variable. Must be a C_IDENTIFIER.
# no more than one of the following may be specified: 'value' or 'valueFrom',
# 'value' is the value of environment.
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
env: []
# - name: "HOME"
# value: "/opt/selectdb"
# - name: HOST_IP
# valueFrom:
# fieldRef:
# apiVersion: v1
# fieldPath: status.hostIP
# affinity is the constraints of pod scheduling
# nodeAffinity: Describes node affinity scheduling rules for the pod.
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "k8s-node1"
# "k8s-node2"
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "fe1"
# "fe2"
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "NotIn"
# values:
# - "fe1"
# "fe2"
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
# the triple <key,value,effect> using the matching operator <operator>.
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
tolerations: []
# - key: "toleration"
# operator: "Equal"
# value: "master"
# - key: "node-role.kubernetes.io/master"
# operator: "Exists"
# effect: "NoSchedule"
# - operator: "Exists"
# hostAliases allows adding entries to /etc/hosts inside the containers
hostAliases: []
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
# - ip: "127.0.0.1"
# hostnames:
# - "hostname1"
# - ip: "127.0.0.2"
# hostnames:
# - "hostname2"
persistentVolumeClaim: {}
# meta volume, mountPath is /opt/apache-doris/fe/doris-meta
# metaPersistentVolume:
# storage: 200Gi
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
# storageClassName: ""
# logs volume, mountPath is /opt/apache-doris/fe/log
# logsPersistentVolume:
# storage: 100Gi
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
# storageClassName: ""
systemInitialization: {}
# initImage: "selectdb/alpine:latest"
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]
beSpec:
replicas: 3
# the pod labels for user select or classify pods.
labels: {}
# selectdb.app.classify: "classify-1"
image:
repository: apache/doris
tag: be-2.1.7
# imagePullSecrets:
# - name: imagePullSecret
# config the service port for be service.
# if you want to use a dedicated port for be service, you can config the port.
# see https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports for more details.
service:
# the be service type, only supported ClusterIP, NodePort, LoadBalancer
type: ClusterIP
# type: NodePort
# servicePorts:
# - nodePort: 30003
# targetPort: 9040
# - nodePort: 30004
# targetPort: 8040
# the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty.
# type: "LoadBalancer"
# loadbalancerIP: "127.0.0.1"
# use LoadBalancer on cloud platform, try to configure it using annotations
annotations: {}
# service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
# specify the configMap, as a dictionary.
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
configMap: {}
# 'be.conf' is the startup config file of be, and the name must be 'be.conf'.
# be.conf: |
# be_port = 9060
# webserver_port = 8040
# heartbeat_service_port = 9050
# brpc_port = 8060
# storage_root_path = /data/doris/be/storage
# Other config files can also be specified here
# hdfs-site.xml: |
# <?xml version="1.0" encoding="UTF-8"?>
#
# <configuration>
# <property>
# <name>dfs.nameservices</name>
# <value>ns</value>
# </property>
#
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
# but please note that the mounting path cannot be repeated.
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
# in the following example, the user needs to manually create configmap 'cm-1'
# mountConfigMaps: {}
# - configMapName: cm-1
# mountPath: /etc/cm1
# - configMapName: cm-2
# mountPath: /etc/cm2
# data:
# copy_file1: |
# text *** content
# copy_file2: |
# text *** content
# If configured separately here, it will overwrite the total resources configuration default.
# but the default configuration of other types will still take effect.
resource: {}
# requests:
# cpu: 8
# memory: 16Gi
# limits:
# cpu: 16
# memory: 32Gi
# If specified, the pod's nodeSelectordisplayName="Map of nodeSelectors to match when scheduling pods on nodes"
nodeSelector: {}
# kubernetes.io/arch: amd64
# kubernetes.io/os: linux
# app.kubernetes.node.name: "k8s-node1"
# env represents an environment variable present in a Container.
# 'name' of the environment variable. Must be a C_IDENTIFIER.
# no more than one of the following may be specified: 'value' or 'valueFrom',
# 'value' is the value of environment.
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
env: []
# - name: "HOME"
# value: "/opt/selectdb"
# - name: HOST_IP
# valueFrom:
# fieldRef:
# apiVersion: v1
# fieldPath: status.hostIP
# affinity is the constraints of pod scheduling
# nodeAffinity: Describes node affinity scheduling rules for the pod.
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "k8s-node1"
# "k8s-node2"
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "fe1"
# "fe2"
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "NotIn"
# values:
# - "fe1"
# "fe2"
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
# the triple <key,value,effect> using the matching operator <operator>.
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
tolerations: []
# - key: "toleration"
# operator: "Equal"
# value: "master"
# - key: "node-role.kubernetes.io/master"
# operator: "Exists"
# effect: "NoSchedule"
# - operator: "Exists"
# hostAliases allows adding entries to /etc/hosts inside the containers
hostAliases: []
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
# - ip: "127.0.0.1"
# hostnames:
# - "hostname1"
# - ip: "127.0.0.2"
# hostnames:
# - "hostname2"
persistentVolumeClaim: {}
# data volume, mountPath is /opt/apache-doris/be/storage
# dataPersistentVolume:
# storage: 200Gi
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
# storageClassName: ""
# logs volume, mountPath is /opt/apache-doris/be/log
# logsPersistentVolume:
# storage: 100Gi
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
# storageClassName: ""
systemInitialization: {}
# initImage: "selectdb/alpine:latest"
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]
cnSpec:
replicas: 3
# the pod labels for user select or classify pods.
labels: {}
# selectdb.app.classify: "classify-1"
image:
repository: apache/doris
tag: be-2.1.7
# imagePullSecrets:
# - name: imagePullSecret
# config the service port for cn service.
# if you want to use a dedicated port for cn service, you can config the port.
# see https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports for more details.
service:
# the cn service type, only supported ClusterIP, NodePort, LoadBalancer
type: ClusterIP
# type: NodePort
# servicePorts:
# - nodePort: 30005
# targetPort: 9040
# - nodePort: 30006
# targetPort: 8040
# type: "LoadBalancer"
# the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty.
# loadbalancerIP: "127.0.0.1"
# use LoadBalancer on cloud platform, try to configure it using annotations
annotations: {}
# service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
# specify the configMap, as a dictionary.
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
configMap: {}
# 'be.conf' is the startup config file of cn, and the name must be 'be.conf'.
# be.conf: |
# be_port = 9060
# webserver_port = 8040
# heartbeat_service_port = 9050
# brpc_port = 8060
# storage_root_path = /data/doris/be/storage
# be_node_role=computation
# Other config files can also be specified here
# hdfs-site.xml: |
# <?xml version="1.0" encoding="UTF-8"?>
#
# <configuration>
# <property>
# <name>dfs.nameservices</name>
# <value>ns</value>
# </property>
#
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
# but please note that the mounting path cannot be repeated.
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
# in the following example, the user needs to manually create configmap 'cm-1'
# mountConfigMaps: {}
# - configMapName: cm-1
# mountPath: /etc/cm1
# - configMapName: cm-2
# mountPath: /etc/cm2
# data:
# copy_file1: |
# text *** content
# copy_file2: |
# text *** content
# If configured separately here, it will overwrite the total resources configuration default.
# but the default configuration of other types will still take effect.
resource: {}
# requests:
# cpu: 8
# memory: 16Gi
# limits:
# cpu: 16
# memory: 32Gi
# If specified, the pod's nodeSelectordisplayName="Map of nodeSelectors to match when scheduling pods on nodes"
nodeSelector: {}
# kubernetes.io/arch: amd64
# kubernetes.io/os: linux
# app.kubernetes.node.name: "k8s-node1"
# env represents an environment variable present in a Container.
# 'name' of the environment variable. Must be a C_IDENTIFIER.
# no more than one of the following may be specified: 'value' or 'valueFrom',
# 'value' is the value of environment.
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
env: []
# - name: "HOME"
# value: "/opt/selectdb"
# - name: HOST_IP
# valueFrom:
# fieldRef:
# apiVersion: v1
# fieldPath: status.hostIP
# affinity is the constraints of pod scheduling
# nodeAffinity: Describes node affinity scheduling rules for the pod.
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "k8s-node1"
# "k8s-node2"
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "fe1"
# "fe2"
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "NotIn"
# values:
# - "fe1"
# "fe2"
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
# the triple <key,value,effect> using the matching operator <operator>.
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
tolerations: []
# - key: "toleration"
# operator: "Equal"
# value: "master"
# - key: "node-role.kubernetes.io/master"
# operator: "Exists"
# effect: "NoSchedule"
# - operator: "Exists"
# hostAliases allows adding entries to /etc/hosts inside the containers
hostAliases: []
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
# - ip: "127.0.0.1"
# hostnames:
# - "hostname1"
# - ip: "127.0.0.2"
# hostnames:
# - "hostname2"
persistentVolumeClaim: {}
# data volume, mountPath is /opt/apache-doris/be/storage
# dataPersistentVolume:
# storage: 200Gi
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
# storageClassName: ""
# logs volume, mountPath is /opt/apache-doris/be/log
# logsPersistentVolume:
# storage: 100Gi
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
# storageClassName: ""
systemInitialization: {}
# initImage: "selectdb/alpine:latest"
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]
# specific cn auto scale policy
autoScalingPolicy:
enable: true
# version: v2
minReplicas: 1
maxReplicas: 4
# hpaPolicy response to horizontlaPodAutoscaler's HorizontalPodAutoscalerSpec
hpaPolicy: {}
# metrics:
# - type: Resource
# resource:
# name: cpu
# target:
# type: Utilization
# averageUtilization: 30
brokerSpec:
replicas: 3
# the pod labels for user select or classify pods.
labels: {}
# selectdb.app.classify: "classify-1"
image:
repository: apache/doris
tag: broker-2.1.7
# imagePullSecrets:
# - name: imagePullSecret
# specify the configMap, as a dictionary.
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
configMap: {}
# 'apache_hdfs_broker.conf' is the startup config file of broker, and the name must be 'apache_hdfs_broker.conf'.
# apache_hdfs_broker.conf: |
# broker_ipc_port: 8000
# client_expire_seconds: 3600
# Other config files can also be specified here
# hdfs-site.xml: |
# <?xml version="1.0" encoding="UTF-8"?>
#
# <configuration>
# <property>
# <name>dfs.nameservices</name>
# <value>ns</value>
# </property>
#
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
# but please note that the mounting path cannot be repeated.
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
# in the following example, the user needs to manually create configmap 'cm-1'
# mountConfigMaps: {}
# - configMapName: cm-1
# mountPath: /etc/cm1
# - configMapName: cm-2
# mountPath: /etc/cm2
# data:
# copy_file1: |
# text *** content
# copy_file2: |
# text *** content
# If configured separately here, it will overwrite the total resources configuration default.
# but the default configuration of other types will still take effect.
resource: {}
# requests:
# cpu: 8
# memory: 16Gi
# limits:
# cpu: 16
# memory: 32Gi
# If specified, the pod's nodeSelectordisplayName="Map of nodeSelectors to match when scheduling pods on nodes"
nodeSelector: {}
# kubernetes.io/arch: amd64
# kubernetes.io/os: linux
# app.kubernetes.node.name: "k8s-node1"
# env represents an environment variable present in a Container.
# 'name' of the environment variable. Must be a C_IDENTIFIER.
# no more than one of the following may be specified: 'value' or 'valueFrom',
# 'value' is the value of environment.
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
env: []
# - name: "HOME"
# value: "/opt/selectdb"
# - name: HOST_IP
# valueFrom:
# fieldRef:
# apiVersion: v1
# fieldPath: status.hostIP
# affinity is the constraints of pod scheduling
# nodeAffinity: Describes node affinity scheduling rules for the pod.
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "k8s-node1"
# "k8s-node2"
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "In"
# values:
# - "fe1"
# "fe2"
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# key: "app.kubernetes.node.name"
# operator: "NotIn"
# values:
# - "fe1"
# "fe2"
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
# the triple <key,value,effect> using the matching operator <operator>.
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
tolerations: []
# - key: "toleration"
# operator: "Equal"
# value: "master"
# - key: "node-role.kubernetes.io/master"
# operator: "Exists"
# effect: "NoSchedule"
# - operator: "Exists"
# hostAliases allows adding entries to /etc/hosts inside the containers
hostAliases: []
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
# - ip: "127.0.0.1"
# hostnames:
# - "hostname1"
# - ip: "127.0.0.2"
# hostnames:
# - "hostname2"
persistentVolumeClaim: {}
# logs volume, mountPath is /opt/apache-doris/apache_hdfs_broker/log
# logsPersistentVolume:
# storage: 100Gi
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
# storageClassName: ""
systemInitialization: {}
# initImage: "selectdb/alpine:latest"
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]

View File

@@ -0,0 +1,387 @@
import random
import threading
from queue import Queue
from paho.mqtt import client as mqtt_client
import numpy as np
import time
import logging
import os
import datetime
from KF_V2 import *
from utils import *
from config import *
import argparse
import json
import yaml
# 首先加载yaml配置
def load_mqtt_config():
config_path = os.getenv('CONFIG_PATH', 'config.yaml')
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return config['mqtt'], config['topics']
# 获取MQTT和topics配置
mqtt_config, topics_config = load_mqtt_config()
## =======================
# MQTT 代理地址
# broker = '192.168.36.234'
# port = 37826
# username = "cmlc"
# password = "odD8#Ve7.B"
client_id = f'python-mqtt-{random.randint(0, 100)}'
# 创建 ArgumentParser 对象
parser = argparse.ArgumentParser(description='处理命令行参数')
# 添加参数 task_id简称 t类型为 int默认值为 1
parser.add_argument('-t', '--task_id', type=str, default="+", help='任务ID')
# 添加参数 gate简称 g类型为 str默认值为 "default_gate"
parser.add_argument('-g', '--gate', type=int, default=30, help='门限值')
# 添加参数 interval简称 i类型为 float默认值为 1.0
parser.add_argument('-i', '--interval', type=float, default=1.0, help='时间间隔')
# 解析命令行参数
args = parser.parse_args()
# 实例化 DataFusion 类
fusion_instance = DataFusion(
gate=args.gate,
interval=args.interval,
)
global task_id
task_id = "10087"
# 从yaml的mqtt_topic中提取基础路径
base_path = topics_config['mqtt_topic'].split('/')[0] # 获取"bridge"
# 更新数据上报的主题格式
providerCode = "DP74b4ef9fb4aaf269"
fusionCode = "DPZYLY"
deviceType = "5ga"
fusionType = "fusion"
deviceId = "10580005"
fusionId = "554343465692430336"
sensor_id_list = ["80103"]
# 使用base_path构建topic
topic = f"{base_path}/{providerCode}/device_data/{deviceType}/{deviceId}"
# 从yaml的sensor_topic中提取基础路径
base_topic = topics_config['sensor_topic'].split('FU_PAM')[0] # 得到 "fromcheck/DP74b4ef9fb4aaf269/device_data/"
# 订阅主题 - 基于yaml格式构建
subscribe_topic = f"{base_topic}5ga/10000000000000" # 将FU_PAM替换为5ga,将+替换为具体ID
# 发布融合结果的主题
# fusionId的来源是下发任务时的ID
publish_topic = f"fromcheck/{fusionCode}/device_data/{fusionType}/{task_id}"
# 更新运行参数的主题
fusion_parameters_topic = topics_config['sensor_topic']
# 生成唯一的 client_id
# 数据池
data_pool = Queue()
run_parameter = None
interval = args.interval
# 定义参考点 PO纬度, 经度)
global reference_point
reference_point = (104.08, 30.51) # 参考点的经纬度
# 数据池
data_pool = Queue()
run_parameter = None
# 初始化数据处理类
pipe = Pipeline(fusion_parameters_topic=topics_config['sensor_topic'], reference_point=reference_point)
fusion_code = "FU_PAM/"+args.task_id
# 设置日志记录
def setup_logging():
# 创建logs目录如果不存在
if not os.path.exists('logs'):
os.makedirs('logs')
# 设置日志文件名(包含日期)
current_time = datetime.datetime.now()
error_log_filename = f'logs/mqtt_connection_{current_time.strftime("%Y%m%d")}_error.log'
# 配置总的日志记录器
logging.basicConfig(
level=logging.INFO, # 记录所有信息
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler() # 同时输出到控制台
]
)
# 配置错误日志记录器
error_logger = logging.getLogger('error_logger')
error_logger.setLevel(logging.ERROR)
# 创建文件处理器
error_handler = logging.FileHandler(error_log_filename)
error_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
# 添加处理器到错误日志记录器
error_logger.addHandler(error_handler)
def connect_mqtt() -> mqtt_client:
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info("Successfully connected to MQTT Broker")
logging.info(f"Client ID: {client_id}")
logging.info(f"Broker: {mqtt_config['broker']}:{mqtt_config['port']}")
# 重新订阅主题
client.subscribe(fusion_parameters_topic)
logging.info(f"Subscribed to fusion parameters topic: {fusion_parameters_topic}")
if hasattr(pipe, 'topics'):
for topic in pipe.topics:
client.subscribe(topic)
logging.info(f"Subscribed to topic: {topic}")
else:
logging.error(f"Failed to connect, return code: {rc} ({DISCONNECT_REASONS.get(rc, '未知错误')})")
def on_disconnect(client, userdata, rc):
current_time = datetime.datetime.now()
reason = DISCONNECT_REASONS.get(rc, "未知错误")
logging.warning(f"Disconnected from MQTT Broker at {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
logging.warning(f"Disconnect reason code: {rc} - {reason}")
if rc != 0:
logging.error("Unexpected disconnection. Attempting to reconnect...")
try:
client.reconnect()
logging.info("Reconnection successful")
except Exception as e:
current_time = datetime.datetime.now()
logging.error(f"Reconnection failed at {current_time.strftime('%Y-%m-%d %H:%M:%S')}: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
client = mqtt_client.Client(client_id, clean_session=True)
client.username_pw_set(mqtt_config['username'], mqtt_config['password'])
# 设置保活时间和重试间隔
client.keepalive = 60 # 60秒的保活时间
client.socket_timeout = 30 # 30秒的socket超时
client.reconnect_delay_set(min_delay=1, max_delay=60) # 重连延迟在1-60秒之间
# 设置遗嘱消息last will message
will_topic = f"fromcheck/{fusionCode}/status/{task_id}"
will_payload = "offline"
client.will_set(will_topic, will_payload, qos=1, retain=True)
# 设置回调函数
client.on_connect = on_connect
client.on_disconnect = on_disconnect
try:
client.connect(mqtt_config['broker'], mqtt_config['port'])
except Exception as e:
logging.error(f"Initial connection failed: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
time.sleep(5)
return connect_mqtt()
# 发送上线状态
client.publish(will_topic, "online", qos=1, retain=True)
return client
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
try:
global run_parameter
global task_id
logging.info(f"Received message on topic: {msg.topic}")
logging.info(f"Message payload: {msg.payload.decode()}")
if "FU_PAM" in msg.topic:
if args.task_id == '+' or fusion_code in msg.topic:
new_run_parameter = msg.payload.decode()
if run_parameter != new_run_parameter:
logging.info(f"Run parameter updated from {run_parameter} to {new_run_parameter}")
run_parameter = new_run_parameter
new_topics = pipe.extract_parms(run_parameter)
logging.info(f"Extracted topics: {new_topics}")
client.subscribe(new_topics) # 重新更新订阅的数据
logging.info(f"Subscribed to new topics: {new_topics}")
logging.info('===========new run_parameter!===============')
current_time = datetime.datetime.now()
task_id = pipe.task_id
else:
data_pool.put((msg.topic, msg.payload))
except Exception as e:
logging.error(f"Error processing message: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
subscribe_topics = [(subscribe_topic, 0), (fusion_parameters_topic, 0)] # 默认QoS为0
client.subscribe(subscribe_topics)
client.on_message = on_message
def publish(client, message):
global task_id
global fusionCode
max_retries = 3
retry_delay = 1 # 初始重试延迟(秒)
def do_publish():
publish_topic = f"bridge/{fusionCode}/device_data/fusion/{task_id}"
try:
result = client.publish(publish_topic, message)
status = result.rc
if status == 0:
current_time = datetime.datetime.now()
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('log.txt', 'a') as log_file:
log_file.write('=====================\n')
log_file.write(f"Send message to topic {publish_topic}\n")
log_file.write(f"time: {formatted_time}\n")
log_file.write(f"{message}\n")
return True
else:
logging.error(f"Failed to send message to topic {publish_topic}, status: {status}")
return False
except Exception as e:
logging.error(f"Error publishing message: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
return False
# 实现重试逻辑
for attempt in range(max_retries):
if do_publish():
return
if attempt < max_retries - 1: # 如果不是最后一次尝试
retry_delay *= 2 # 指数退避
logging.warning(f"Retrying publish in {retry_delay} seconds...")
time.sleep(retry_delay)
logging.error(f"Failed to publish message after {max_retries} attempts")
def data_fusion(fusion_container):
global data_pool
data_list = []
# 从数据池中提取所有的数据
while not data_pool.empty():
data_now = data_pool.get()
processed_data = pipe.process_json_data(data_now[1])
# 筛选有意义的数据
if processed_data and processed_data.get("objects"): # 只记录有 objects 的数据
data_list.append(processed_data)
if data_list: # 只有当有数据时才写日志
current_time = datetime.datetime.now()
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('Data_log.txt', 'a') as log_file: # 以追加模式打开日志文件
log_file.write('=====================\n') # 写入分隔符
log_file.write(f"Get message \n")
log_file.write(f"time: {formatted_time}\n") # 写入分隔符
log_file.write(f"{data_list}\n") # 写入消息内容
sensor_data = pipe.data_encoder(data_list)
logging.info(sensor_data)
filtered_results = fusion_container.run(sensor_data)
processed_data = pipe.data_decoder(filtered_results)
processed_data = json.dumps(processed_data, indent=4)
return processed_data # 返回处理后的 JSON 字符串
def fusion_runner(client):
global run_parameter
pre_run_parameter = run_parameter
last_run_time = time.time()
last_health_check = time.time()
health_check_interval = 30 # 每30秒进行一次健康检查
fusion_container = DataFusion(args.gate, args.interval)
def check_connection():
if not client.is_connected():
logging.warning("MQTT client disconnected during fusion_runner")
try:
client.reconnect()
logging.info("Successfully reconnected in fusion_runner")
return True
except Exception as e:
logging.error(f"Reconnection failed in fusion_runner: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
return False
return True
while True:
try:
current_time = time.time()
# 定期健康检查
if current_time - last_health_check >= health_check_interval:
if not check_connection():
time.sleep(5) # 如果连接失败等待5秒后继续
continue
last_health_check = current_time
# 数据处理和发送
if current_time - last_run_time >= interval:
if not check_connection():
continue
last_run_time = current_time
if run_parameter != pre_run_parameter:
fusion_parms = pipe.extract_fusion_parms(run_parameter)
fusion_container.set_parameter(fusion_parms)
pre_run_parameter= run_parameter
processed_data = data_fusion(fusion_container)
if processed_data:
publish(client, processed_data)
except Exception as e:
logging.error(f"Error in fusion_runner: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
time.sleep(1)
def run():
# 初始化日志系统
setup_logging()
logging.error("Starting MQTT client application")
while True: # 添加外层循环来处理完全断开的情况
try:
client = connect_mqtt()
subscribe(client)
logging.info("Starting fusion_runner thread")
fusion_runner_thread = threading.Thread(target=fusion_runner, args=(client,), daemon=True)
fusion_runner_thread.start()
logging.info("Starting MQTT loop")
client.loop_forever()
except Exception as e:
logging.critical(f"Critical error in main loop: {str(e)}")
logging.critical(f"Exception type: {type(e).__name__}")
logging.critical(f"Stack trace:", exc_info=True)
logging.info("Restarting in 5 seconds...")
time.sleep(5)
if __name__ == '__main__':
run()

View File

@@ -0,0 +1,145 @@
import json
import time
import random
from math import radians, degrees, sin, cos
from paho.mqtt import client as mqtt_client
import datetime
import numpy as np
from math import atan2, sqrt
# 坐标转换函数
def convert_to_cartesian(lat, lon, reference_point):
"""将经纬度转换为基于参考点的直角坐标,考虑地球椭球模型"""
# 地球椭球参数WGS84
a = 6378137.0 # 长半轴,单位:米
f = 1 / 298.257223563 # 扁率
e2 = 2 * f - f ** 2 # 第一偏心率平方
# 提取参考点坐标
ref_lat, ref_lon = reference_point
# 转换成弧度
lat_rad = radians(lat)
lon_rad = radians(lon)
ref_lat_rad = radians(ref_lat)
ref_lon_rad = radians(ref_lon)
# 计算曲率半径
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
# 计算基于参考点的平面直角坐标
delta_lon = lon_rad - ref_lon_rad
X = (N + 0) * cos(ref_lat_rad) * delta_lon
Y = (a * (1 - e2)) / (1 - e2 * sin(ref_lat_rad) ** 2) * (lat_rad - ref_lat_rad)
return X, Y
# 模拟数据生成函数
def generate_simulated_data(reference_point, radius_km, angle):
"""生成模拟数据,符合 Pipeline 处理需求"""
R = 6371000 # 地球半径(米)
# 将半径转换为弧度
radius = radius_km / R
# 计算参考点经纬度
lat0, lon0 = reference_point
# 计算新的点的经度和纬度
new_lat = lat0 + degrees(radius * cos(radians(angle)))
new_lon = lon0 + degrees(radius * sin(radians(angle)) / cos(radians(lat0)))
# 生成模拟 JSON 数据
mock_data = {
"deviceId": "80103",
"deviceType": 10,
"objects": [
{
"altitude": 150.0, # 模拟高度
"extension": {
"traceId": "00000000000001876",
"channel": "5756500000",
"objectType": 30,
"uavId": "UAS123456", # 新增字段,与 Pipeline 对应
"uavModel": "DJI Mini 3 Pro", # 模拟 UAV 型号
"deviceId": "80103" # 来源设备 ID
},
"height": 120.0, # 高度
"latitude": new_lat,
"longitude": new_lon,
"X": 0.0, # 预留字段,供转换函数填充
"Y": 0.0, # 预留字段,供转换函数填充
"speed": 15.0, # 模拟速度
"objectId": "AX0009", # 模拟目标 ID
"time": int(time.time() * 1000), # 当前时间戳(毫秒)
"source": [["sensor1", "UAS123456"]] # 模拟来源
}
],
"providerCode": "ZYLYTEST",
"ptTime": int(time.time() * 1000) # 当前时间戳(毫秒)
}
# 转换坐标
for obj in mock_data["objects"]:
lat, lon = obj["latitude"], obj["longitude"]
obj["X"], obj["Y"] = convert_to_cartesian(lat, lon, reference_point)
return json.dumps(mock_data, indent=4)
# MQTT 推送代码
broker = '192.168.36.234'
port = 37826
providerCode = "DP74b4ef9fb4aaf269"
deviceType = "5ga"
deviceId = "10580015"
topic = f"bridge/{providerCode}/device_data/{deviceType}/{deviceId}"
client_id = f'python-mqtt-{random.randint(0, 1000)}'
username = "cmlc"
password = "odD8#Ve7.B"
reference_point = (31.880000, 117.240000) # 经度和纬度
radius = 1500 # 半径,单位:米
def connect_mqtt():
"""连接 MQTT Broker"""
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print(f"Failed to connect, return code {rc}")
client = mqtt_client.Client(client_id)
client.on_connect = on_connect
client.username_pw_set(username, password)
client.connect(broker, port)
return client
def publish(client):
"""推送生成的模拟数据"""
msg_count = 0
angle = 0
while True:
time.sleep(1)
msg = generate_simulated_data(reference_point, radius, angle)
result = client.publish(topic, msg)
status = result.rc
if status == 0:
print(f"Send `{msg_count}` to topic `{topic}`")
else:
print(f"Failed to send message to topic {topic}")
msg_count += 1
angle += 1
def run():
client = connect_mqtt()
client.loop_start()
publish(client)
if __name__ == '__main__':
run()

View File

@@ -0,0 +1,15 @@
# 构建阶段
FROM python:3.12.8-slim-bookworm as builder
WORKDIR /build
COPY requirements.txt .
RUN pip install --user -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
# 运行阶段
FROM python:3.12.8-slim-bookworm
WORKDIR /app
COPY --from=builder /root/.local/lib/python3.12/site-packages /root/.local/lib/python3.12/site-packages
COPY . .
CMD ["python", "check.py"]

View File

@@ -0,0 +1,279 @@
import datetime
from os import error
import numpy as np
from config import *
def calculate_euclidean_distances(A, B):
# 计算A和B之间的欧式距离
distances = np.linalg.norm(A - B, axis=1)
# 找到最小距离及其索引
min_distance_index = np.argmin(distances)
min_distance = distances[min_distance_index]
return min_distance, min_distance_index
def are_lists_equal(listA, listB):
# 对两个列表中的子列表进行排序
if len(listA) == 0:
return False
sorted_listA = sorted(listA, key=lambda x: (x[0], x[1]))
sorted_listB = sorted(listB, key=lambda x: (x[0], x[1]))
# 比较排序后的列表是否相等
return sorted_listA == sorted_listB
def sigmoid(x, a=10, b=0.1):
# 调整Sigmoid函数使其在x=1时值为0.5
# a和b是调整参数用于控制函数的形状
return 1 / (1 + np.exp(-a * (x - shift_value))) + b
class KalmanFilter:
def __init__(self, measurement, com_id, measurement_variance=1,process_variance=1e-1):
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp() * 1000000)
ms = measurement.tolist()
self.m = np.array([ms[0],ms[1],ms[2],0,0,0]) # 状态量,6维度
self.origin = [com_id] #origin 表示最强响应
self.source = self.origin #source 表示所有关联的观测值
self.survive = np.array(survive_initial) # 初始化生存值
self.duration = 0
self.counter = 0
self.id = str(timestamp % 3600000000 + np.random.randint(1000))
self.F = [[1,0,0,1,0,0],
[0,1,0,0,1,0],
[0,0,1,0,0,1],
[0,0,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1]]
self.F = np.array(self.F)
self.H = [[1,0,0,0,0,0],
[0,1,0,0,0,0],
[0,0,1,0,0,0]]
self.H = np.array(self.H)
self.R = measurement_variance * np.eye(3)
self.Q = process_variance * np.eye(6)
self.Q[3, 3] = self.Q[3, 3] * 1e-3
self.Q[4, 4] = self.Q[4, 4] * 1e-3
self.Q[5, 5] = self.Q[5, 5] * 1e-3
self.P = np.eye(6)*0.1
self.I = np.eye(6)
self.expend = 1
self.v = np.array([0,0,0])
self.born_time = int(current_time.timestamp() * 1000)
self.latest_update = self.born_time
self.m_history = self.m
self.s_history = []
self.origin_set = [self.origin]
def predict(self):
F = self.F
self.m = np.dot(F,self.m.T) # 简单一步预测模型
self.m = self.m.T
self.P = np.dot(np.dot(F,self.P),F.T) + self.Q
self.survive = self.survive * decay # 应用衰减值
self.origin_set = np.unique(np.array(self.origin_set), axis=0).tolist() # 计算关联集合
def update(self, res, run_timestamp, gate):
self.duration += 0.6 # 每次更新时,持续时间+0.6
if len(res['distances']) == 0:
mmd = 1e8
else:
min_distance_index = np.argmin(res['distances'])
mmd = res['distances'][min_distance_index]
measurement = res['measurements'][min_distance_index]
# 进行更新
if mmd < gate * self.expend:
H = self.H
I = self.I
self.expend = max(self.expend * 0.8, 1)
kalman_gain = np.dot(np.dot(self.P,H.T),np.linalg.pinv(np.dot(np.dot(H,self.P),H.T)+self.R))
self.m += np.dot(kalman_gain,(measurement.T - np.dot(H,self.m.T)))
self.m = self.m.T
self.P = np.dot((I - np.dot(kalman_gain,H)),self.P)
self.origin = [res['key_ids'][min_distance_index]]
self.counter += 1
self.survive = sigmoid(self.counter) # 新映射函数
# 如下操作防止对速度过于自信
self.P[3, 3] = max(1e-1, self.P[3, 3])
self.P[4, 4] = max(1e-1, self.P[4, 4])
self.P[5, 5] = max(1e-1, self.P[5, 5])
# 截取速度
self.v = self.m[3:6]
self.origin_set.append(self.origin)
self.latest_update = run_timestamp #对时间进行处理
else:
self.expend = min(self.expend*1.2,1.5) # 若关联不上,则扩大门限继续搜索
self.P[3, 3] = min(self.P[3, 3]*1.1,1)
self.P[4, 4] = min(self.P[4, 4]*1.1,1)
self.P[5, 5] = min(self.P[5, 5]*1.1,1)
self.counter -= 1
self.counter = max(self.counter,0)
self.m_history = np.vstack((self.m_history, self.m))
self.s_history.append(self.survive)
def one_correlation(self, data_matrix, id_list):
# 计算现有数据与data_matrix的差距
min_distance, min_index = calculate_euclidean_distances(self.m[0:3], data_matrix)
m_id = id_list[min_index]
measurement = data_matrix[min_index, :]
return m_id, min_distance, measurement
def correlation(self, sensor_data):
# 遍历传感器进行计算
res = {'m_ids':[], 'distances':[], 'measurements':[], 'key_ids':[]}
for value in sensor_data:
if len(value['id_list']) > 0:
m_id, min_distance, measurement = self.one_correlation(value['data_matrix'], value['id_list'])
key = value['deviceId']
res['m_ids'].append(m_id)
res['measurements'].append(measurement)
res['key_ids'].append([key, m_id])
# 将发生过关联的目标赋予更大的置信度
if [key, m_id] in self.origin_set:
min_distance = min_distance * 0.2
res['distances'].append(min_distance)
return res
#融合类的构造函数
class DataFusion:
def __init__(self,gate=25,interval = 1,fusion_type = 1,
measuremrnt_variance=1,process_variance =1e-1):
"""
初始化DataFusion类。
"""
# self.task_id = task_id
self.interval = interval
self.gate = gate
self.targets = []
self.fusion_type = fusion_type
self.existence_thres = 0.01
self.show_thres = show_thres
self.process_variance = process_variance
self.measuremrnt_variance = measuremrnt_variance
def set_parameter(self,fusion_parms):
print("GO!!!!!!!!!")
print(fusion_parms)
def obtain_priority(self,sensor_data):
self.priority_dict = dict()
for data in sensor_data:
if data.get('priority'):
self.priority_dict[data['deviceId']] = data['priority']
else:
self.priority_dict[data['deviceId']] = 1
def out_transformer(self,target):
out_former = {
'objectId': target.id,
'survive': target.survive.tolist(),
'state': target.m.tolist(),
'speed': np.linalg.norm(target.v).tolist() / self.interval,
'source': target.source,
'sigma': np.diag(target.P).tolist(),
'X': target.m[0].tolist(),
'Y': target.m[1].tolist(),
'Z': target.m[2].tolist(),
'Vx': target.v[0].tolist(),
'Vy': target.v[1].tolist(),
'Vz': target.v[2].tolist(),
'born_time': str(target.born_time)
}
return out_former
def run(self, sensor_data):
current_time = datetime.datetime.now()
run_timestamp = int(current_time.timestamp() * 1000)
fusion_data = []
selected_list = []
self.obtain_priority(sensor_data)
# 遍历所有已知对象
for target in self.targets:
print(f"Fusion target id:{target.id} with survive: {target.survive} at :{target.m}\n")
if target.survive < self.existence_thres:
continue
target.predict()
res = target.correlation(sensor_data)
target.update(res,run_timestamp,self.gate)
# ==================================================
now_id = []
t_sum = 0
for r, distance in enumerate(res['distances']):
if distance < self.gate:
now_id.append(res['key_ids'][r])
selected_list.append(res['key_ids'][r])
D_Id = res['key_ids'][r][0]
t_sum += self.priority_dict[D_Id]
target.source = now_id
# ==================================================
if self.fusion_type == 2 and t_sum < 2:
target.survive = target.survive * 0.5
out_former = self.out_transformer(target)
if target.survive > self.show_thres: # 若存活概率大于0.4,则写入数据文件
fusion_data.append(out_former)
# 根据匹配关系筛选数值
self.selected_list = selected_list
for data in sensor_data:
self.new_born(data)
self.remove_duplicates()
# ==================================================
self.fusion_process_log(fusion_data)
return fusion_data
def new_born(self,value,):
for j, id in enumerate(value['id_list']):
key = value['deviceId']
if [key, id] not in self.selected_list:
if self.fusion_type == 3:
if value['priority'] > 50:
self.targets.append(KalmanFilter(value['data_matrix'][j, :], [key, id],self.measuremrnt_variance,self.process_variance))
else:
self.targets.append(KalmanFilter(value['data_matrix'][j, :], [key, id],self.measuremrnt_variance,self.process_variance))
self.selected_list.append([key, id]) # 把新增的目标,加入到集合中去
def remove_duplicates(self):
# 创建一个空列表用于存储需要删除的列表的索引
to_delete = []
# 遍历所有列表的索引
for i in range(len(self.targets)):
if self.targets[i].survive < self.existence_thres:
to_delete.append(self.targets[i].id)
continue
if self.targets[i].survive < self.show_thres:
continue
for j in range(i + 1, len(self.targets)):
# 比较两个列表是否相同
if are_lists_equal(self.targets[i].source, self.targets[j].source):
# 如果列表相同,记录编号较大的索引
if self.targets[i].duration < self.targets[j].duration:
to_delete.append(self.targets[i].id)
else:
to_delete.append(self.targets[j].id)
# 使用删除法,提高目标管理效率
for item_id in sorted(to_delete, reverse=True):
for target in self.targets:
if target.id == item_id:
self.targets.remove(target)
break
def fusion_process_log(self,fusion_data):
current_time = datetime.datetime.now()
# 格式化时间为年月日时分秒格式
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('process_log.txt', 'a') as log_file: # 以追加模式打开日志文件
log_file.write('=====================\n') # 写入分隔符
log_file.write(f"time: {formatted_time}\n") # 写入分隔符
log_file.write(f"data:\n {fusion_data}\n") # 写入消息内容

View File

@@ -0,0 +1,53 @@
from KF_V2 import *
# ======================
sensor_id_list = ['AUV01','AUV02']
sensor_data = []
sensor_data.append({
'data_matrix': np.array([[0, 0, 0], [100, 100, 100]]),
'id_list': ['001','002'],
'deviceId': 'AUV01',
'devicePs':[0.2], #第一个值表示测量误差
'latest_time': [0],
'priority':1
})
sensor_data.append({
'data_matrix': np.array([[0, 0, 0], [100, 100, 100]]),
'id_list': ['003','004'],
'deviceId': 'AUV02',
'deivceProperties':[0.2],
'latest_time': [0],
'priority':100
})
fusion_container = DataFusion(25,1,3)
for i in range(15):
print(i)
# 在循环开始时,对 sensor_data 中的 data_matrix 进行修改
if i%5 == 0:
temp = {
'data_matrix': np.array([]),
'id_list': [],
'deviceId': 'AUV01',
'devicePs': [0.2], # 第一个值表示测量误差
'latest_time': [0]
}
c_sensor_data = []
c_sensor_data.append(temp)
c_sensor_data.append(temp)
filted_results = fusion_container.run(c_sensor_data)
else:
sensor_data[0]['data_matrix'][0, :] += 1 # 第一行每个元素加1
sensor_data[0]['data_matrix'][1, :] -= 1 # 第二行每个元素减1
sensor_data[1]['data_matrix'][0, :] += 1 # 第一行每个元素加1
sensor_data[1]['data_matrix'][1, :] -= 1 # 第二行每个元素减1
filted_results = fusion_container.run(sensor_data)
print("results:\n")
for d in filted_results:
print(d)

View File

@@ -0,0 +1,142 @@
import numpy as np
from scipy import signal
class AoAConverter:
def __init__(self):
self.p = [1e8, 1e8, 1e8]
def to_cartesian(self, theta_rad, phi_rad):
# theta_rad = np.radians(theta)
# phi_rad = np.radians(phi)
# 注意!程序输入的是弧度单位
"""将球坐标转换为直角坐标"""
x = np.sin(theta_rad) * np.cos(phi_rad)
y = np.sin(theta_rad) * np.sin(phi_rad)
z = np.cos(theta_rad)
pc =np.array([x,y,z])
return pc
def calc_error(self, pc, mc):
# 计算预测坐标与实际观测坐标之间的差的平方
mc = np.expand_dims(mc, axis=1)
diff_squared = (pc - mc) ** 2
# 对差值的平方求和,得到误差的平方
error_squared = np.sum(diff_squared, axis=0)
# 开平方根得到误差
return np.sqrt(error_squared)
import numpy as np
def find_best_r(self, theta, phi, mc, r_range):
"""在给定范围内搜索最优的 r 值"""
# 将 r_range 转换为 NumPy 数组,以便进行矢量化操作
r_values = np.array(r_range)
# 计算所有可能的直角坐标
pc = self.to_cartesian(theta, phi)
# 进行维度扩充以进行矩阵乘法
r_values = np.expand_dims(r_values, axis=0)
pc = np.expand_dims(pc, axis=1)
# 计算所有 r 值对应的误差
# print([pc.shape,r_values.shape])
D = np.dot(pc, r_values)
errors = self.calc_error(D, mc)
r_values = np.squeeze(r_values)
# 找到最小误差及其对应的 r 值
min_error = np.min(errors)
best_r = r_values[np.argmin(errors)] #因为多加了一维所以这里要反求0
return [best_r,min_error]
def projected_measure(self,theta, phi, r,p0):
pc = self.to_cartesian(theta, phi)
neo_p = r*pc + p0
return np.array(neo_p)
converter = AoAConverter()
def calculate_euclidean_distances(A, BX):
# 计算A和B之间的欧式距离
B = BX['data_matrix']
N = B.shape[0]
r_range = np.linspace(-5, 5, 100)
if BX.get('AOA_pos'):
# 若是来自AOA的数据则进行替换
sensor_pos = BX.get('AOA_pos')
ob_pos = A - sensor_pos
r0 = np.linalg.norm(ob_pos)
B_new = []
for i in range(N):
theta = B[i,0]
phi = B[i,1]
[best_r,min_error] = converter.find_best_r(theta, phi,ob_pos, r0+r_range)
print(min_error)
B_new.append(converter.projected_measure(theta, phi,best_r,sensor_pos))
B_new = np.array(B_new)
else:
B_new = B
distances = np.linalg.norm(A - B_new, axis=1)
# 找到最小距离及其索引
min_distance_index = np.argmin(distances)
min_distance = distances[min_distance_index]
return [min_distance, min_distance_index, B_new]
def are_lists_equal(listA, listB):
# 对两个列表中的子列表进行排序
if len(listA) == 0:
return False
sorted_listA = sorted(listA, key=lambda x: (x[0], x[1]))
sorted_listB = sorted(listB, key=lambda x: (x[0], x[1]))
# 比较排序后的列表是否相等
return sorted_listA == sorted_listB
def sigmoid(x, a=10, b=0.1):
# 调整Sigmoid函数使其在x=4时值为0.5
# a和b是调整参数用于控制函数的形状
return 1 / (1 + np.exp(-a * (x - 1))) + b
def calculate_correlation(A, B):
"""
计算两个数组矩阵所有列的相关系数的最大值。
参数:
A -- 第一个NumPy数组
B -- 第二个NumPy数组
"""
A = np.exp(-1j*A/50)
B = np.exp(1j*B/50)
corr_res = []
for col in range(3):
a = A[:, col]
b = B[:, col]
convolution = signal.convolve(a, b[::-1])
corr_res.append(convolution)
max_corr = np.sum(np.abs(np.array(corr_res)),0)
max_corr = np.max(max_corr)/3
return max_corr
def calculate_history_distances(target, b):
# 使用前后向的形式进行计算
A = target.m_history
v = target.v
# 计算每一行与向量b的差的L2范数欧氏距离
if A.shape[0] < 10:
return np.inf
local_time = np.linspace(0, 10, 20)
local_time = np.expand_dims(local_time, axis=1)
v = np.expand_dims(v, axis=1)
A_pre = A[-10:,0:3]
A_post = np.dot(local_time,v.T)
A_all = np.vstack((A_pre, A_post))
distances = np.linalg.norm(A_all - b, axis=1)
# 找到最小距离
min_distance = np.min(distances)
return min_distance

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# 使用说明,在主机之上选择你的合适的目录
# 上传的最新的项目代码,然后把这个脚本放置于你的项目目录之中
# 修改下面的参数
if [[ $# -eq 0 ]]; then
echo "tag version is null!"
exit 233
fi
tag_version=$1
echo "start to build docker image tag is => ${tag_version}"
docker build -t harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:${tag_version} .
echo ""
echo "login to docker hub"
docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn
echo ""
echo "start to push image to hub!"
docker push harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:${tag_version}

View File

@@ -0,0 +1,374 @@
import os
import subprocess
import paho.mqtt.client as mqtt
import json
import time
import threading
import logging
from config import *
import datetime
import schedule # 需要先安装: pip install schedule
import yaml
# 读取yaml配置
def load_mqtt_config():
config_path = os.getenv('CONFIG_PATH', 'config.yaml')
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return config['mqtt'], config['topics']
# 获取MQTT和topics配置
mqtt_config, topics_config = load_mqtt_config()
# 设置日志配置
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('check.log'),
logging.StreamHandler()
]
)
# 存储运行中的任务及其配置
running_tasks = {}
task_configs = {}
# 启动 Dev_Fusion.py 的命令模板
fusion_command_template = f"nohup python Dev_Fusion.py -t {{task_id}} -g {DEV_FUSION_G} -i {DEV_FUSION_I} > /dev/null 2> error.log &"
# 日志文件夹路径
log_folder = "tasklog"
os.makedirs(log_folder, exist_ok=True)
# 创建全局锁
task_lock = threading.Lock()
def compare_configs(old_config, new_config):
"""
比较两个配置是否有实质性差异
返回 True 表示有差异,需要重启
返回 False 表示无差异,只需转发
"""
try:
# 1. 检查 devices 列表
old_devices = old_config.get('devices', [])
new_devices = new_config.get('devices', [])
if len(old_devices) != len(new_devices):
return True
# 为每个设备创建一个关键信息元组进行比较
def get_device_key(device):
return (
device.get('device_id'),
device.get('device_topic'),
device.get('device_type'),
device.get('reference_point')
)
old_device_keys = {get_device_key(d) for d in old_devices}
new_device_keys = {get_device_key(d) for d in new_devices}
# 如果设备的关键信息有变化,需要重启
if old_device_keys != new_device_keys:
return True
# 2. 检查参考点
old_ref = old_config.get('reference_point')
new_ref = new_config.get('reference_point')
if old_ref != new_ref:
return True
# 3. 其他参数(如 sampling_rate的变化不需要重启
logging.info("No critical configuration changes detected")
return False
except Exception as e:
logging.error(f"Error comparing configs: {str(e)}")
return True # 出错时视为有差异,安全起见重启实例
def stop_task(task_id):
"""停止指定的任务实例"""
try:
if task_id in running_tasks:
process = running_tasks[task_id]
# 使用 pkill 命令终止对应的 Python 进程
subprocess.run(f"pkill -f 'python.*Dev_Fusion.py.*-t {task_id}'", shell=True)
process.wait(timeout=5) # 等待进程结束
del running_tasks[task_id]
del task_configs[task_id]
logging.info(f"Task {task_id} stopped successfully")
except Exception as e:
logging.error(f"Error stopping task {task_id}: {str(e)}")
# 多线程处理函数
def handle_task(client, task_id, payload):
try:
with task_lock: # 使用锁保护共享资源
data = json.loads(payload)
sensor_topic = topics_config['sensor_topic'].replace("+", task_id)
# 记录配置更新
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def log_config_update(action):
with open(log_file, "a") as f:
f.write(f"\n=== Configuration Update at {current_time} ===\n")
f.write(f"Task ID: {task_id}\n")
f.write(f"MQTT_TOPIC: {topics_config['mqtt_topic']}\n")
f.write(f"Payload: {payload}\n")
f.write(f"Action: {action}\n")
f.write("=" * 50 + "\n")
# 检查任务是否已经在运行
if task_id in running_tasks:
# 检查是否有存储的配置
if task_id in task_configs:
# 比较新旧配置
if compare_configs(task_configs[task_id], data):
logging.info(f"Configuration changed for task {task_id}, restarting...")
stop_task(task_id)
log_config_update("Configuration changed, restarting instance")
start_new_instance(client, task_id, payload, data)
else:
# 配置无变化,只转发消息
logging.info(f"No configuration change for task {task_id}, forwarding message")
log_config_update("Message forwarded (no critical changes)")
client.publish(sensor_topic, payload)
else:
# 没有存储的配置,存储新配置并转发
logging.info(f"No stored config for task {task_id}, storing first config")
task_configs[task_id] = data
log_config_update("First config stored and forwarded")
client.publish(sensor_topic, payload)
else:
# 任务不存在,启动新实例
log_config_update("New instance started")
start_new_instance(client, task_id, payload, data)
except Exception as e:
logging.error(f"Error handling task {task_id}: {str(e)}")
def start_new_instance(client, task_id, payload, config):
"""启动新的 Dev_Fusion 实例"""
try:
# 启动 Dev_Fusion.py 实例
fusion_command = fusion_command_template.format(task_id=task_id)
process = subprocess.Popen(fusion_command, shell=True)
running_tasks[task_id] = process
task_configs[task_id] = config
logging.info(f"Dev_Fusion.py started successfully for Task ID {task_id}")
# 保存日志,使用追加模式
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with open(log_file, "a") as f: # 使用 "a" 模式追加内容
f.write(f"\n=== Configuration Update at {current_time} ===\n")
f.write(f"Task ID: {task_id}\n")
f.write(f"MQTT_TOPIC: {topics_config['mqtt_topic']}\n")
f.write(f"Payload: {payload}\n")
# 记录是否触发了重启
f.write("Action: New instance started\n")
f.write("=" * 50 + "\n")
# 等待实例启动
time.sleep(0.5)
# 发送配置
sensor_topic = topics_config['sensor_topic'].replace("+", task_id)
client.publish(sensor_topic, payload)
logging.info(f"Configuration sent to {sensor_topic}")
except Exception as e:
logging.error(f"Error starting new instance for task {task_id}: {str(e)}")
if task_id in running_tasks:
del running_tasks[task_id]
del task_configs[task_id]
# MQTT 回调函数
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info("Connected to MQTT broker")
client.subscribe(topics_config['mqtt_topic']) # 使用yaml中的topic
else:
logging.error(f"Connection failed with code {rc}: {DISCONNECT_REASONS.get(rc, 'Unknown error')}")
def on_message(client, userdata, msg):
try:
payload = msg.payload.decode("utf-8")
logging.info(f"Received message on topic {msg.topic}")
data = json.loads(payload)
task_id = data.get("task_id")
if task_id:
thread = threading.Thread(target=handle_task, args=(client, task_id, payload))
thread.start()
else:
logging.warning("Received message without task_id")
except json.JSONDecodeError:
logging.error("Received message is not valid JSON")
except Exception as e:
logging.error(f"Error processing message: {str(e)}")
def check_running_instances():
"""检查系统中已经运行的 Dev_Fusion 实例"""
try:
# 使用 ps 命令查找运行中的 Dev_Fusion.py 实例
result = subprocess.run("ps aux | grep 'python.*Dev_Fusion.py' | grep -v grep",
shell=True, capture_output=True, text=True)
found_instances = []
for line in result.stdout.splitlines():
# 从命令行参数中提取 task_id
if '-t' in line:
parts = line.split()
for i, part in enumerate(parts):
if part == '-t' and i + 1 < len(parts):
task_id = parts[i + 1]
pid = parts[1] # 进程 ID 通常在第二列
found_instances.append((task_id, pid))
for task_id, pid in found_instances:
logging.info(f"Found running instance for task {task_id}, pid: {pid}")
# 读取该任务的最新配置
config = read_latest_config(task_id)
if config:
# 将已运行的实例添加到 running_tasks
running_tasks[task_id] = subprocess.Popen(['echo', ''], stdout=subprocess.PIPE)
running_tasks[task_id].pid = int(pid)
task_configs[task_id] = config
logging.info(
f"Successfully loaded config for task {task_id} from tasklog/received_tasklog_{task_id}.txt")
else:
logging.warning(f"No valid config found for task {task_id}, stopping instance...")
subprocess.run(f"pkill -f 'python.*Dev_Fusion.py.*-t {task_id}'", shell=True)
logging.info(f"Stopped instance {task_id} due to missing config")
logging.info(f"Finished checking instances. Loaded {len(running_tasks)} tasks with valid configs")
except Exception as e:
logging.error(f"Error checking running instances: {str(e)}")
def read_latest_config(task_id):
"""读取指定任务的最新配置"""
try:
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
if not os.path.exists(log_file):
logging.error(f"No log file found for task {task_id}")
return None
with open(log_file, 'r') as f:
content = f.read()
# 按配置更新块分割
updates = content.split('=== Configuration Update at')
if not updates:
return None
# 获取最后一个更新块
latest_update = updates[-1]
# 提取 Payload
payload_start = latest_update.find('Payload: ') + len('Payload: ')
payload_end = latest_update.find('\nAction:')
if payload_end == -1: # 如果没有 Action 行
payload_end = latest_update.find('\n===')
if payload_start > 0 and payload_end > payload_start:
payload = latest_update[payload_start:payload_end].strip()
return json.loads(payload)
return None
except Exception as e:
logging.error(f"Error reading latest config for task {task_id}: {str(e)}")
return None
def restart_all_instances():
"""重启所有运行中的实例"""
logging.info("Scheduled restart: Beginning restart of all instances")
# 复制当前运行的任务列表,因为我们会修改 running_tasks
tasks_to_restart = list(running_tasks.keys())
for task_id in tasks_to_restart:
try:
# 读取最新配置
config = read_latest_config(task_id)
if not config:
logging.error(f"Could not find latest config for task {task_id}, skipping restart")
continue
# 停止当前实例
logging.info(f"Stopping task {task_id} for scheduled restart")
stop_task(task_id)
# 将配置转换为 JSON 字符串
payload = json.dumps(config)
# 启动新实例
logging.info(f"Starting new instance for task {task_id} with latest config")
start_new_instance(mqtt_client, task_id, payload, config)
except Exception as e:
logging.error(f"Error restarting task {task_id}: {str(e)}")
def setup_scheduled_restart(restart_time="03:00"):
"""设置定时重启任务"""
schedule.every().day.at(restart_time).do(restart_all_instances)
def run_schedule():
while True:
schedule.run_pending()
time.sleep(30) # 每30秒检查一次
# 启动调度器线程
scheduler_thread = threading.Thread(target=run_schedule, daemon=True)
scheduler_thread.start()
def main():
global mqtt_client # 添加全局变量以在重启时使用
# 在启动时检查已运行的实例
check_running_instances()
# 创建 MQTT 客户端
mqtt_client = mqtt.Client()
mqtt_client.on_connect = on_connect
mqtt_client.on_message = on_message
mqtt_client.username_pw_set(mqtt_config['username'], mqtt_config['password'])
# 设置定时重启默认凌晨3点
setup_scheduled_restart()
while True:
try:
mqtt_client.connect(mqtt_config['broker'], mqtt_config['port'], 60)
mqtt_client.loop_forever()
except Exception as e:
logging.error(f"MQTT connection error: {str(e)}")
time.sleep(5)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,10 @@
mqtt:
broker: "192.168.35.178"
port: 31884
username: "cmlc"
password: "4YPk*DS%+5"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"

View File

@@ -0,0 +1,67 @@
# # MQTT 配置
# broker = "192.168.35.178" # 代理地址
# port = 31883 # 端口
# username = "cmlc"
# password = "odD8#Ve7.B"
#
# # check.py 使用的topic
# MQTT_TOPIC = "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
#
# # Dev_Fusion.py 使用的topic
# SENSOR_TOPIC = "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
# 在 check 中去配置 Dev_Fusion.py启动命令
DEV_FUSION_G = 40 # 参数 g
DEV_FUSION_I = 0.6 # 参数 i
#KF_V2设置
shift_value = 1
survive_initial = 0.25
decay = 0.7
show_thres = 0.4
reference_point = (104.08, 30.51)
# logs 配置
DISCONNECT_REASONS = {
0: "正常断开",
1: "协议版本不匹配",
2: "客户端标识符无效",
3: "服务器不可用",
4: "用户名或密码错误",
5: "未授权",
6: "消息代理不可用",
7: "TLS错误",
8: "QoS不支持",
9: "客户端已被禁止",
10: "服务器繁忙",
11: "客户端已被禁止(证书相关)",
128: "未指定错误",
129: "畸形数据包",
130: "协议错误",
131: "通信错误",
132: "服务器保持连接超时",
133: "服务器内部错误",
134: "服务器正在关闭",
135: "服务器资源不足",
136: "客户端网络套接字错误",
137: "服务器正在关闭连接",
138: "服务器拒绝连接",
139: "服务器不支持该版本",
140: "客户端ID已被使用",
141: "连接速率超限",
142: "最大连接数超限",
143: "保持连接超时",
144: "会话被接管",
145: "连接已断开",
146: "主题别名无效",
147: "数据包太大",
148: "消息速率太高",
149: "配额超限",
150: "管理行为",
151: "无效的负载格式",
152: "保留未支持",
153: "QoS未支持",
154: "使用另一个服务器",
155: "服务器已迁移",
156: "连接不支持",
}

View File

@@ -0,0 +1,10 @@
mqtt:
broker: "192.168.35.178"
port: 31883
username: "cmlc"
password: "odD8#Ve7.B"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"

View File

@@ -0,0 +1,19 @@
try {
$ErrorActionPreference = "Stop"
Write-Host "1. Uploading binary exec..." -ForegroundColor Green
ssh root@192.168.35.71 "mkdir -p /root/wdd/ranjing-python-devfusion/"
scp C:\Users\wdd\IdeaProjects\ProjectOctopus\agent-common\SplitProject\ranjing-python-devfusion\* root@192.168.35.71:/root/wdd/ranjing-python-devfusion/
Write-Host "2. Exec the command ..." -ForegroundColor Blue
Write-Host ""
Write-Host ""
ssh root@192.168.35.71 "cd /root/wdd/ranjing-python-devfusion/ && docker build -t ranjing/dev-fusion:v1.0 ."
Write-Host ""
Write-Host ""
} catch {
Write-Host "操作失败: $_" -ForegroundColor Red
exit 1
}

View File

@@ -0,0 +1,8 @@
#!/bin/bash
docker run --name devfusion \
-d \
--rm \
-v /root/wdd/ranjing-python-devfusion/config-dev.yaml:/dev-fusion/config.yaml \
harbor.cdcdyy.com.cn/cmii/cmii-uavms-pyfusion:6.2.0

View File

@@ -0,0 +1,62 @@
from math import radians, cos, degrees
from math import radians, degrees, sin, cos, atan2, sqrt
def convert_to_cartesian(lat, lon, reference_point):
"""将经纬度转换为基于参考点的直角坐标,考虑地球椭球模型"""
# 地球椭球参数WGS84
a = 6378137.0 # 长半轴,单位:米
f = 1 / 298.257223563 # 扁率
e2 = 2 * f - f ** 2 # 第一偏心率平方
# 提取参考点坐标
ref_lat, ref_lon = reference_point
# 转换成弧度
lat_rad = radians(lat)
lon_rad = radians(lon)
ref_lat_rad = radians(ref_lat)
ref_lon_rad = radians(ref_lon)
# 计算曲率半径
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
# 计算基于参考点的平面直角坐标
delta_lon = lon_rad - ref_lon_rad
X = (N + 0) * cos(ref_lat_rad) * delta_lon
Y = (a * (1 - e2)) / (1 - e2 * sin(ref_lat_rad) ** 2) * (lat_rad - ref_lat_rad)
return X, Y
def convert_to_geodetic(x, y, reference_point):
"""将直角坐标转换为经纬度,考虑地球椭球模型"""
# 地球椭球参数WGS84
a = 6378137.0 # 长半轴,单位:米
f = 1 / 298.257223563 # 扁率
e2 = 2 * f - f ** 2 # 第一偏心率平方
# 提取参考点坐标
ref_lat, ref_lon = reference_point
# 转换成弧度
ref_lat_rad = radians(ref_lat)
ref_lon_rad = radians(ref_lon)
# 计算曲率半径
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
# 计算纬度
lat_rad = y * (1 - e2 * sin(ref_lat_rad) ** 2) / (a * (1 - e2)) + ref_lat_rad
# 计算经度
if cos(ref_lat_rad) == 0:
lon_rad = 0
else:
lon_rad = x / ((N + 0) * cos(ref_lat_rad)) + ref_lon_rad
# 转换回角度
lat = degrees(lat_rad)
lon = degrees(lon_rad)
return lat, lon

View File

@@ -0,0 +1,423 @@
import datetime
from transformation import *
import json
import numpy as np
class Pipeline:
def __init__(self, fusion_parameters_topic,reference_point):
self.fusion_parameters_topic = fusion_parameters_topic
self.task_id = '554343465692430336'
self.reference_point = reference_point
# self.deviceId = deviceId
self.sensor_id_list = ["10000000000"]
self.fusionCode = 'DPZYLY'
self.publish_topic = f"bridge/{self.fusionCode}/device_data/fusion/{self.task_id}"
self.priority_dict = {"10000000000":1}
self.uavInfo_bucket = dict()
self.target_bowl = dict()
self.device_info_dict = dict()
self.device_type_mapping = {
"5ga": 0,
"radar": 1,
"spec": 2,
"oe": 3,
"cm": 4,
"dec": 5,
"ifr": 6,
"cv": 7,
"isrs": 8,
"aoa": 9,
"tdoa": 10,
"dcd": 11,
"direct": 100,
"rtk": 101,
"rid": 102,
"fusion": 1000,
"other": 999 # 假设 'other' 对应于未知设备类型
}
self.device_type_speedrank = {
"radar": 1,
"spec": 2,
"oe": 3,
"cm": 4,
"dec": 5,
"ifr": 6,
"cv": 7,
"isrs": 8,
"aoa": 9,
"tdoa": 10,
"dcd": 13,
"direct": 12,
"5ga": 11,
"rid": 14,
"rtk": 15,
"other": 0 # 假设 'other' 对应于未知设备类型
}
import json
def process_json_data(self, json_data):
"""
将 JSON 数据转换为字典,并添加 X 和 Y 属性。
"""
data_dict = json.loads(json_data)
# 安全访问 'ptTime' 键
pt_time = data_dict.get('ptTime')
if pt_time is not None:
print(pt_time)
else:
print("Key 'ptTime' not found in data_dict.")
# 安全访问 'objects' 键
objects = data_dict.get('objects')
if objects is None:
print("Key 'objects' not found in data_dict.")
return data_dict # 如果 'objects' 键不存在,直接返回原始字典或根据需要进行其他处理
# 如果 'objects' 键存在,继续处理
for record in objects:
# 检查 'latitude' 和 'longitude' 键是否存在于 record 中
if 'latitude' in record and 'longitude' in record:
lat = record['latitude']
lon = record['longitude']
X, Y = convert_to_cartesian(lat, lon, self.reference_point)
record['X'] = X
record['Y'] = Y
else:
print("Record is missing 'latitude' or 'longitude' keys.")
return data_dict
def data_encoder(self, data_list):
"""
生成数据矩阵和 ID 列表。
"""
sensor_data = []
for sensor_id in self.sensor_id_list:
temp = {'data_matrix': [],
'id_list': [],
'deviceId': sensor_id,
'latest_time': [],
'priority':1}
for record in data_list:
if record.get('noteData'):
obj = record['noteData']
obj['objectId'] = obj['uasId']
obj['deviceId'] = obj["extension"]['deviceId']
record['objects'] = [obj]
if record['deviceId'] == sensor_id:
temp['priority'] = self.priority_dict[sensor_id]
if record.get('objects'):
for obj in record['objects']:
if obj['objectId'] in temp['id_list']:
position = temp['id_list'].index(obj['objectId'])
if int(record['ptTime']) > int(temp['latest_time'][position]):
temp['data_matrix'][position] = [obj['X'], obj['Y'], obj['altitude']]
else:
temp['data_matrix'].append([obj['X'], obj['Y'], obj['altitude']])
temp['id_list'].append(obj['objectId'])
temp['latest_time'].append(record['ptTime'])
# 把扩展地段写入
if obj.get('extension'):
B_id = str(record['deviceId'])+str(obj['objectId'])
self.uavInfo_bucket[B_id] = obj['extension']
# 如果对象有speed字段将其添加到extension中
if obj.get('speed'):
self.uavInfo_bucket[B_id]['speed'] = obj['speed']
# 如果对象有height字段也存储它
if obj.get('height'):
self.uavInfo_bucket[B_id]['height'] = obj['height']
# 写入到数据字典中
temp['data_matrix'] = np.array(temp['data_matrix'])
sensor_data.append(temp)
return sensor_data
def process_extension(self, target):
# 定义一个字典,包含给定的键值对
extension = {
"objectType": 30,
"uavSN": "Un-known",
"uavModel": "Un-known",
"pilotLat": 0.0,
"pilotLon": 0.0,
"speedX": 0.0,
"speedY": 0.0,
"speedZ": 0.0,
"time": 0.0,
"born_time": 0.0
}
# 从target_bowl获取历史值
if target['objectId'] in self.target_bowl.keys():
extension = self.target_bowl[target['objectId']]
result_source = target['source']
# 对数据进行更新
for source in result_source:
id = str(source[0]) + str(source[1])
if self.uavInfo_bucket.get(id):
for key, value in self.uavInfo_bucket[id].items():
# 只有当新值是有效值时才更新
if value not in ["Un-known", 0.0, None, "Unknown", "DJI Mavic"]:
extension[key] = value
extension['born_time'] = int(target['born_time'])
# 更新target_bowl以保持状态
self.target_bowl[target['objectId']] = extension
return extension
def data_decoder(self, filtered_results):
"""
解码过滤后的结果。
"""
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp() * 1000)
combined_objects = []
for target in filtered_results:
X = target['X']
Y = target['Y']
Z = target['Z'] # 这里的Z实际上是altitude
lat, lon = convert_to_geodetic(X, Y, self.reference_point)
extension = self.process_extension(target)
extension['time'] = int(timestamp)
extension['born_time'] = int(int(target['born_time']) / 1000) # 毫秒单位数据
new_origin_source = []
for source in target['source']:
device_id, object_id = source
# 从 device_info_dict 获取设备缩写
device_abbreviation = self.device_info_dict.get(device_id, {}).get('device_type', 'other')
# 使用映射字典获取设备类型
device_type = self.device_type_mapping.get(device_abbreviation, 999)
new_origin_source.append(f"{device_type}_{device_id}_{object_id}")
# 根据优先级顺序选择速度
highest_priority_speed = None
highest_priority = float('inf')
for source in target['source']:
device_id, object_id = source
B_id = str(device_id) + str(object_id)
if self.uavInfo_bucket.get(B_id):
device_type = self.device_info_dict.get(device_id, {}).get('device_type', 'other')
priority = self.device_type_speedrank.get(device_type, float('inf'))
if priority < highest_priority:
highest_priority = priority
# 获取速度并进行单位转换
speed = self.uavInfo_bucket[B_id].get('speed', target['speed'])
if device_type == "5ga": # 如果设备类型是5ga进行转换
speed = speed / 3.6 # 从 km/h 转换为 m/s
highest_priority_speed = speed
# 确保 highest_priority_speed 是从设备获取的速度
if highest_priority_speed is None:
# 如果没有找到当前速度,查找历史记录中的速度
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
highest_priority_speed = obj.get("speed")
break
if highest_priority_speed is None:
print(f"Warning: No speed found for target {target['objectId']}, using default target speed.")
new_speed = target['speed']
else:
new_speed = highest_priority_speed
else:
new_speed = highest_priority_speed
# Debug 输出,检查速度来源
print(f"Selected speed for target {target['objectId']}: {new_speed} from device with priority {highest_priority}")
# 获取height字段
height = None
for source in target['source']:
device_id, object_id = source
B_id = str(device_id) + str(object_id)
if self.uavInfo_bucket.get(B_id):
if self.uavInfo_bucket[B_id].get('height'):
height = self.uavInfo_bucket[B_id]['height']
break
# 如果当前没有获取到height查找历史记录中的height
if height is None:
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
prev_height = obj.get("height")
if prev_height is not None: # 如果找到有效的历史height
height = prev_height
break
# 如果仍然没有找到height保持上一次的最新历史height
if height is None and combined_objects:
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
height = obj.get("height")
break
temp = {
# "msg_cnt":result['msg_cnt'],#增加msg_cnt用于检测有无丢包
"objectId": target['objectId'],
"X": X,
"Y": Y,
"height": height, # 使用当前height或历史height
"altitude": Z,
"speed": new_speed, # 使用优先级最高的速度
'latitude': lat,
'longitude': lon,
'sigma': target['sigma'],
"extension": {
"origin_source": new_origin_source, # 更新后的 origin_source
# 其他extension字段...
"objectType": extension.get('objectType', 0),
"uavSN": extension.get("uavSN", "Un-known"),
"uavModel": extension.get("uavModel", "Un-known"),
"pilotLat": extension.get("pilotLat", 0.0),
"pilotLon": extension.get("pilotLon", 0.0),
"speedX": 0.0, # 不再使用速度分量
"speedY": 0.0,
"speedZ": 0.0,
"time": int(timestamp),
"born_time": int(int(target['born_time']) / 1000),
},
"time": int(timestamp),
}
# 检查extension中的objectType是否已经被设置为非0值如果是则不再覆盖.
if extension.get('objectType', 0) != 0 or target['objectId'] not in [obj['objectId'] for obj in
combined_objects]:
temp["extension"]["objectType"] = extension.get('objectType', 0)
else:
# 查找combined_objects中相同objectId的objectType如果不存在则使用0
existing_object_types = [obj["extension"].get('objectType', 0) for obj in combined_objects if
obj["objectId"] == target['objectId']]
if existing_object_types and existing_object_types[0] != 0:
temp["extension"]["objectType"] = existing_object_types[0]
else:
temp["extension"]["objectType"] = 0
# 检查并更新uavSN和uavModel
invalid_values = ["Un-known", 0.0, None, "Unknown", "DJI Mavic"]
# 检查uavSN是否为字母数字组合防止其他部分引入奇怪的值
current_sn = extension.get('uavSN', "Un-known")
if isinstance(current_sn, str):
has_letter = any(c.isalpha() for c in current_sn)
has_digit = any(c.isdigit() for c in current_sn)
if not (has_letter and has_digit):
# 先查找相同objectId的历史有效SN
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
prev_sn = obj["extension"].get("uavSN", "Un-known")
if isinstance(prev_sn, str):
has_letter = any(c.isalpha() for c in prev_sn)
has_digit = any(c.isdigit() for c in prev_sn)
if has_letter and has_digit:
current_sn = prev_sn
break
temp["extension"]["uavSN"] = current_sn
temp["extension"]["uavModel"] = extension.get('uavModel', "Un-known")
combined_objects.append(temp)
data_processed = {
"deviceType": 1000,
"providerCode": "DPZYLY",
"deviceId": self.task_id,
"objects": combined_objects,
"ptTime": int(timestamp)
}
# 筛选有意义的数据
if data_processed and data_processed.get("objects") and len(data_processed["objects"]) > 0:
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('PB_log.txt', 'a') as log_file: # 以追加模式打开日志文件
log_file.write('=====================\n') # 写入分隔符
log_file.write(f"time: {formatted_time}\n") # 写入时间戳
log_file.write(f"data: {data_processed}\n")
return data_processed
def extract_parms(self, parm_data):
"""
提取参数。
"""
id_list = [] # 存储设备ID
priority_dict = {} # 存储设备优先级
device_info_dict = {} # 新增:存储设备详细信息的字典,用于后续拿到与
data_dict = json.loads(parm_data)
print(data_dict)
self.task_id = data_dict['task_id']
new_topics = [("fromcheck/DPZYLY/fly_data/rtk/#", 0)]
devices = data_dict['devices']
for device in devices:
device_id = device['device_id']
if device_id:
id_list.append(device_id)
new_topics.append((device["device_topic"], 0))
# 存储设备优先级默认优先级为1
if device.get('priority'):
priority_dict[device_id] = device['priority']
else:
priority_dict[device_id] = 1
# 使用列表存储设备的详细信息topic、type、sampling_rate完成一对多
device_info_dict[device_id] = {
'device_topic': device['device_topic'],
'device_type': device['device_type'],
'sampling_rate': device['properties'].get('sampling_rate', 1) # 默认为None如果没有提供
}
self.priority_dict = priority_dict
self.device_info_dict = device_info_dict # 将设备信息字典存储到实例变量中
self.sensor_id_list = id_list
# 处理参考点
if data_dict.get('reference_point'):
try:
original_reference_point = data_dict['reference_point']
if len(original_reference_point) == 2: # 确保是包含两个元素的元组或列表
self.reference_point = (
float(original_reference_point[0]) + 0,
float(original_reference_point[1]) + 0
)
else:
raise ValueError("Invalid reference_point structure. Must be a tuple or list with two elements.")
except Exception as e:
print(f"Error processing reference_point: {e}")
self.reference_point = None # 或者设置为某个默认值
return new_topics
def extract_fusion_parms(self,parm_data):
data_dict = json.loads(parm_data)
# 定义 fusion_dict 字典,包含需要从 data_dict 中提取的键
fusion_dict = {
"fusion_type": 1,
"gate": 1,
"interval": 1,
"show_thres": 0.4
}
# 检查 data_dict 中是否存在对应的键,并更新 fusion_dict 中的值
if "fusion_type" in data_dict:
fusion_dict["fusion_type"] = data_dict["fusion_type"]
if "gate" in data_dict:
fusion_dict["gate"] = data_dict["gate"]
if "interval" in data_dict:
fusion_dict["interval"] = data_dict["interval"]
if "show_thres" in data_dict:
fusion_dict["show_thres"] = data_dict["show_thres"]
# 返回更新后的 fusion_dict
return fusion_dict

View File

@@ -0,0 +1,71 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fe-configmap
namespace: doriscluster
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
# the output dir of stderr and stdout
LOG_DIR = ${DORIS_HOME}/log
JAVA_OPTS="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UseMembar -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xloggc:$DORIS_HOME/log/fe.gc.log.$CUR_DATE"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xlog:gc*:$DORIS_HOME/log/fe.gc.log.$CUR_DATE:time"
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC
sys_log_mode = NORMAL
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
arrow_flight_sql_port = 9090
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
enable_fqdn_mode = true
---
apiVersion: v1
kind: ConfigMap
metadata:
name: be-configmap
namespace: doriscluster
labels:
app.kubernetes.io/component: be
data:
be.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
PPROF_TMPDIR="$DORIS_HOME/log/"
JAVA_OPTS="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xloggc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# since 1.2, the JAVA_HOME need to be set to run BE process.
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,lg_tcache_max:20,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
arrow_flight_sql_port = 39091
brpc_port = 8060

View File

@@ -0,0 +1,94 @@
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
labels:
app.kubernetes.io/name: doriscluster
name: doriscluster-helm
namespace: doriscluster
spec:
feSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- doris
replicas: 3
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
limits:
cpu: 8
memory: 16Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap fe-configmap --from-file=fe.conf
configMapName: fe-configmap
resolveKey: fe.conf
persistentVolumes:
- mountPath: /opt/apache-doris/fe/doris-meta
name: doris-fe-2000g
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-56
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2000Gi
- mountPath: /opt/apache-doris/fe/jdbc_drivers
name: doriscluster-storage-fe-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-58
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
beSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- doris
replicas: 3
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
limits:
cpu: 8
memory: 24Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap be-configmap --from-file=be.conf
configMapName: be-configmap
resolveKey: be.conf
persistentVolumes:
- mountPath: /opt/apache-doris/be/storage
name: doris-1-9000g-pvc
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-57
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/jdbc_drivers
name: doriscluster-storage-be-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-58
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1000Gi

View File

@@ -0,0 +1,119 @@
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
labels:
app.kubernetes.io/name: doriscluster
name: doriscluster-helm
namespace: doriscluster
spec:
feSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
replicas: 1
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
limits:
cpu: 8
memory: 16Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap fe-configmap --from-file=fe.conf
configMapName: fe-configmap
resolveKey: fe.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/fe/doris-meta
name: doriscluster-storage0
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
# notice: if the storage size less 5G, fe will not start normal.
requests:
storage: 500Gi
- mountPath: /opt/apache-doris/fe/jdbc_drivers
name: doriscluster-storage-fe-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: cmlc-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
beSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
replicas: 3
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
limits:
cpu: 8
memory: 24Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap be-configmap --from-file=be.conf
configMapName: be-configmap
resolveKey: be.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/be/storage
name: doriscluster-storage1
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/storage
name: doriscluster-storage2
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/log
name: doriscluster-storage3
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/jdbc_drivers
name: doriscluster-storage-be-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: cmlc-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,340 @@
# Source: doris-operator/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: serviceaccount
app.kubernetes.io/instance: controller-doris-operator-sa
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator
namespace: doriscluster
---
# Source: doris-operator/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: doris-operator
rules:
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- get
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/finalizers
verbs:
- update
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
# Source: doris-operator/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: clusterrolebinding
app.kubernetes.io/instance: doris-operator-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: doris-operator
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: doriscluster
---
# Source: doris-operator/templates/leader-election-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: role
app.kubernetes.io/instance: leader-election-role
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-role
namespace: doriscluster
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: doris-operator/templates/leader-election-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: rolebinding
app.kubernetes.io/instance: leader-election-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-rolebinding
namespace: doriscluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: doriscluster
---
# Source: doris-operator/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-operator
namespace: doriscluster
labels:
control-plane: doris-operator
app.kubernetes.io/name: deployment
app.kubernetes.io/instance: doris-operator
app.kubernetes.io/component: doris-operator
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
spec:
selector:
matchLabels:
control-plane: doris-operator
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: doris-operator
labels:
control-plane: doris-operator
spec:
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
# according to the platforms which are supported by your solution.
# It is considered best practice to support multiple architectures. You can
# build your manager image using the makefile target docker-buildx.
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
securityContext:
runAsNonRoot: true
# TODO(user): For common cases that do not require escalating privileges
# it is recommended to ensure that all your Pods/Containers are restrictive.
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
# seccompProfile:
# type: RuntimeDefault
containers:
- command:
- /dorisoperator
args:
- --leader-elect
image: 172.16.100.55:8033/cmii/doris.k8s-operator:1.3.1
name: dorisoperator
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
requests:
cpu: 2
memory: 4Gi
limits:
cpu: 2
memory: 4Gi
serviceAccountName: doris-operator
terminationGracePeriodSeconds: 10

View File

@@ -0,0 +1,68 @@
package assert
import (
"fmt"
"reflect"
"strings"
)
var Asserter = NewAssert()
// Assert utility class
type Assert struct{}
// NewAssert returns a new instance of Assert
func NewAssert() *Assert {
return &Assert{}
}
// NotEmpty checks if the given value is not empty
func (a *Assert) NotEmpty(value interface{}, message string) {
if isEmptyValue(reflect.ValueOf(value)) {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
// NotBlank checks if the given string is not blank
func (a *Assert) NotBlank(str string, message string) {
if str == "" || len(strings.TrimSpace(str)) == 0 {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
// Equals checks if two values are equal
func (a *Assert) Equals(expected, actual interface{}, message string) {
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("Assertion failed: %s. Expected '%v' but got '%v'", message, expected, actual))
}
}
// Nil checks if the given value is nil
func (a *Assert) Nil(value interface{}, message string) {
if value != nil {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
// NotNil checks if the given value is not nil
func (a *Assert) NotNil(value interface{}, message string) {
if value == nil {
panic(fmt.Sprintf("Assertion failed: %s", message))
}
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Map:
return v.IsNil() || v.Len() == 0
case reflect.Ptr:
if v.IsNil() {
return true
}
return isEmptyValue(v.Elem())
default:
return false
}
}

View File

@@ -0,0 +1,90 @@
// Copyright 2013-2022 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package assert provides helper functions for testing.
package assert
import (
"fmt"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strings"
"testing"
)
// skip defines the default call depth
const skip = 2
// Equal asserts that got and want are equal as defined by
// reflect.DeepEqual. The test fails with msg if they are not equal.
func Equal(t *testing.T, got, want interface{}, msg ...string) {
if x := equal(2, got, want, msg...); x != "" {
fmt.Println(x)
t.Fail()
}
}
func equal(skip int, got, want interface{}, msg ...string) string {
if !reflect.DeepEqual(got, want) {
return fail(skip, "got %v want %v %s", got, want, strings.Join(msg, " "))
}
return ""
}
// Panic asserts that function fn() panics.
// It assumes that recover() either returns a string or
// an error and fails if the message does not match
// the regular expression in 'matches'.
func Panic(t *testing.T, fn func(), matches string) {
if x := doesPanic(2, fn, matches); x != "" {
fmt.Println(x)
t.Fail()
}
}
func doesPanic(skip int, fn func(), expr string) (err string) {
defer func() {
r := recover()
if r == nil {
err = fail(skip, "did not panic")
return
}
var v string
switch r.(type) {
case error:
v = r.(error).Error()
case string:
v = r.(string)
}
err = matches(skip, v, expr)
}()
fn()
return ""
}
// Matches asserts that a value matches a given regular expression.
func Matches(t *testing.T, value, expr string) {
if x := matches(2, value, expr); x != "" {
fmt.Println(x)
t.Fail()
}
}
func matches(skip int, value, expr string) string {
ok, err := regexp.MatchString(expr, value)
if err != nil {
return fail(skip, "invalid pattern %q. %s", expr, err)
}
if !ok {
return fail(skip, "got %s which does not match %s", value, expr)
}
return ""
}
func fail(skip int, format string, args ...interface{}) string {
_, file, line, _ := runtime.Caller(skip)
return fmt.Sprintf("\t%s:%d: %s\n", filepath.Base(file), line, fmt.Sprintf(format, args...))
}

View File

@@ -0,0 +1,55 @@
// Copyright 2013-2022 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package assert
import "testing"
func TestEqualEquals(t *testing.T) {
if got, want := equal(2, "a", "a"), ""; got != want {
t.Fatalf("got %q want %q", got, want)
}
}
func TestEqualFails(t *testing.T) {
if got, want := equal(2, "a", "b"), "\tassert_test.go:16: got a want b \n"; got != want {
t.Fatalf("got %q want %q", got, want)
}
}
func TestPanicPanics(t *testing.T) {
if got, want := doesPanic(2, func() { panic("foo") }, ""), ""; got != want {
t.Fatalf("got %q want %q", got, want)
}
}
func TestPanicPanicsAndMatches(t *testing.T) {
if got, want := doesPanic(2, func() { panic("foo") }, "foo"), ""; got != want {
t.Fatalf("got %q want %q", got, want)
}
}
func TestPanicPanicsAndDoesNotMatch(t *testing.T) {
if got, want := doesPanic(2, func() { panic("foo") }, "bar"), "\tassert.go:62: got foo which does not match bar\n"; got != want {
t.Fatalf("got %q want %q", got, want)
}
}
func TestPanicPanicsAndDoesNotPanic(t *testing.T) {
if got, want := doesPanic(2, func() {}, "bar"), "\tassert.go:65: did not panic\n"; got != want {
t.Fatalf("got %q want %q", got, want)
}
}
func TestMatchesMatches(t *testing.T) {
if got, want := matches(2, "aaa", "a"), ""; got != want {
t.Fatalf("got %q want %q", got, want)
}
}
func TestMatchesDoesNotMatch(t *testing.T) {
if got, want := matches(2, "aaa", "b"), "\tassert_test.go:52: got aaa which does not match b\n"; got != want {
t.Fatalf("got %q want %q", got, want)
}
}

10
agent-common/go.mod Normal file
View File

@@ -0,0 +1,10 @@
module wdd.io/agent-common
go 1.22.1
require (
go.uber.org/zap v1.27.0
golang.org/x/net v0.24.0
)
require go.uber.org/multierr v1.10.0 // indirect

16
agent-common/go.sum Normal file
View File

@@ -0,0 +1,16 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -0,0 +1,272 @@
package image
import (
"os"
"path/filepath"
"strings"
"wdd.io/agent-common/logger"
)
var log = logger.Log
var unSupported = "UN-SUPPORT-APP-NAME"
const CmiiHarborPrefix = "harbor.cdcyy.com.cn/cmii/"
func ImageFullNameToAppName(imageFullName string) (appName string) {
// harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0 ==> cmii-uav-platform
// 10.1.1.1:8033/cmii/ok:1.2 不支持 不允许存在
// rancher/fleet:v0.3.4
// ossr/srs:v5.0.1
// nginx:latest
// bitnami/minio:2022.5.4
// simonrupf/chronyd:0.4.3
s := strings.Split(imageFullName, ":")
if len(s) == 1 {
// nginx
return imageFullName
}
// 10.1.1.1:8033/cmii/ok:1.2
// harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0 ==> cmii-uav-platform
// rancher/fleet:v0.3.4
// ossr/srs:v5.0.1
// nginx:latest
// bitnami/minio:2022.5.4
// simonrupf/chronyd:0.4.3
middle := s[len(s)-2]
split := strings.Split(middle, "/")
return split[len(split)-1]
}
func ImageFullNameToImageTag(imageFullName string) (imageTag string) {
s := strings.Split(imageFullName, ":")[1]
if strings.Contains(s, ":") {
// 10.1.1.1:8033/cmii/ok:1.2
return strings.Split(imageFullName, ":")[2]
}
return s
}
func ImageFullNameToImageNameAndTag(imageFullName string) (imageName, imageTag string) {
s := strings.Split(imageFullName, ":")
if len(s) == 1 {
// nginx
return imageFullName, "latest"
}
// 10.1.1.1:8033/cmii/ok:1.2
// harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0 ==> cmii-uav-platform
// rancher/fleet:v0.3.4
// ossr/srs:v5.0.1
// nginx:latest
// bitnami/minio:2022.5.4
// simonrupf/chronyd:0.4.3
middle := s[len(s)-2]
split := strings.Split(middle, "/")
return split[len(split)-1], s[len(s)-1]
}
// ImageFullNameToGzipFileName 必须输出长度为4的内容 =出现得次数为3
func ImageFullNameToGzipFileName(imageFullName string) (gzipFileName string) {
// rancher/fleet:v0.3.4
// ossr/srs:v5.0.1 ==> docker=cmii=srs=v5.0.1.tar.gz
// nginx:latest
// bitnami/minio:2022.5.4
// simonrupf/chronyd:0.4.3
s := strings.Split(imageFullName, ":")
if len(s) == 1 {
// nginx
return "docker=cmii=" + imageFullName + "=latest.tar.gz"
} else if len(s) > 2 {
// 10.1.1.1:8033/cmii/ok:1.2 不支持 不允许存在
return "unsupport=" + imageFullName
}
middle := s[len(s)-2]
split := strings.Split(middle, "/")
if len(split) == 1 {
// nginx:latest
return "docker=cmii=" + s[0] + "=" + s[1] + ".tar.gz"
}
if strings.HasPrefix(imageFullName, "harbor.cdcyy.com.cn") {
// harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0 ==> cmlc=cmii=cmii-uav-platform=5.4.0.tar.gz
// harbor.cdcyy.cn/cmii/cmii-uav-platform:5.4.0
// harbor.cdcyy.com.cn/cmii/cmlc-ai/cmlc-ai-operator:v5.2.0-t4-no-dino
return "cmlc=cmii=" + split[len(split)-1] + "=" + s[1] + ".tar.gz"
} else if strings.Contains(imageFullName, "rancher") {
return "docker=rancher=" + split[len(split)-1] + "=" + s[1] + ".tar.gz"
} else {
// ossrs/srs:v4.0.136
return "docker=cmii=" + split[len(split)-1] + "=" + s[1] + ".tar.gz"
}
return "unsupport=" + imageFullName
}
// ImageNameToTargetImageFullName 将ImageName转换为目标TargetHosts的全名称ImageName的格式为 短名称或者长名称 均可
func ImageNameToTargetImageFullName(imageName, targetHarborHost string) string {
targetProject := "cmii"
if strings.HasPrefix(imageName, "rancher") {
// rancher/rancher:v2.5.7 rancher名称的形式
targetProject = "rancher"
}
countOfSplit := strings.Count(imageName, "/")
split := strings.Split(imageName, "/")
targetImageName := imageName
switch countOfSplit {
case 0:
// nginx:latest
targetImageName = targetHarborHost + "/" + targetProject + "/" + imageName
break
default:
// 10.250.0.10:8033/cmii/cmii-uav-gateway:v1.0.0
targetImageName = targetHarborHost + "/" + targetProject + "/" + split[countOfSplit]
break
}
//if strings.HasPrefix(imageFullName, CmiiHarborPrefix) {
// imageFullName = strings.TrimPrefix(imageFullName, CmiiHarborPrefix)
//} else if strings.HasPrefix(imageFullName, "docker.io") {
// imageFullName = strings.TrimPrefix(imageFullName, "docker.io")
//}
//// rancher/123:v123
//if strings.HasPrefix(imageFullName, "rancher") {
// return targetHostFullName + "/" + imageFullName
//}
//// ossr/srs:v4.0.5
//if strings.Contains(imageFullName, "/") {
// imageFullName = strings.Split(imageFullName, "/")[1]
//}
//
//// srs:v4.0.5
//// cmii-uav-platform:5.4.0
//s := targetHostFullName + "/cmii/" + imageFullName
//log.InfoF("ImageFullName: [%s] to TargetImageFullName: [%s]", imageName, targetImageName)
return targetImageName
}
func GzipFileNameToImageFullName(gzipFileName string) (imageFullName string) {
if !strings.HasSuffix(gzipFileName, ".tar.gz") {
log.ErrorF(" %s is not end with .tar.gz", gzipFileName)
return ""
}
gzipFileName = strings.TrimSuffix(gzipFileName, ".tar.gz")
if strings.HasPrefix(gzipFileName, "docker=library") {
// docker=library=busybox=latest.tar.gz
return strings.Split(gzipFileName, "=")[2] + ":" + strings.Split(gzipFileName, "=")[3]
}
if strings.HasPrefix(gzipFileName, "docker") {
return strings.Split(gzipFileName, "=")[1] + "/" + strings.Split(gzipFileName, "=")[2] + ":" + strings.Split(gzipFileName, "=")[3]
}
if strings.HasPrefix(gzipFileName, "cmlc=cmii=") {
return strings.Split(gzipFileName, "=")[2] + ":" + strings.Split(gzipFileName, "=")[3]
}
return gzipFileName
}
func GzipFileNameToImageNameAndTag(gzipFileName string) (imageName, imageTag string) {
if !strings.HasSuffix(gzipFileName, ".tar.gz") {
log.ErrorF(" %s is not end with .tar.gz", gzipFileName)
return "", ""
}
gzipFileName = strings.TrimSuffix(gzipFileName, ".tar.gz")
if strings.HasPrefix(gzipFileName, "docker=library") {
// docker=library=busybox=latest.tar.gz
return strings.Split(gzipFileName, "=")[2], strings.Split(gzipFileName, "=")[3]
}
if strings.HasPrefix(gzipFileName, "docker") {
// docker=kubernetes=kubernetes-dashboard=v2.4.0.tar.gz
return strings.Split(gzipFileName, "=")[1] + "/" + strings.Split(gzipFileName, "=")[2], strings.Split(gzipFileName, "=")[3]
}
if strings.HasPrefix(gzipFileName, "cmlc=cmii=") {
return strings.Split(gzipFileName, "=")[2], strings.Split(gzipFileName, "=")[3]
}
return "", ""
}
func GzipFolderPathToCmiiImageTagMaps(gzipFolderPath string) (frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap map[string]string) {
frontendImageVersionMap = make(map[string]string)
backendImageVersionMap = make(map[string]string)
srsImageVersionMap = make(map[string]string)
filepath.WalkDir(gzipFolderPath, func(path string, d os.DirEntry, err error) error {
//fmt.Println(path)
if d == nil {
return nil
}
name := d.Name()
if strings.HasSuffix(name, ".tar.gz") {
imageName, imageTag := GzipFileNameToImageNameAndTag(name)
if strings.Contains(imageName, "platform") {
frontendImageVersionMap[imageName] = imageTag
} else if strings.Contains(imageName, "srs") {
srsImageVersionMap[imageName] = imageTag
} else if strings.Contains(imageName, "operator") {
srsImageVersionMap[imageName] = imageTag
} else {
backendImageVersionMap[imageName] = imageTag
}
}
return nil
})
return frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap
}
func CmiiImageMapFromImageFullNameList(cmiiImageFullNameList []string) (cmiiImageVersionMap map[string]string) {
cmiiImageVersionMap = make(map[string]string)
for _, imageFullName := range cmiiImageFullNameList {
imageName, imageTag := ImageFullNameToImageNameAndTag(imageFullName)
cmiiImageVersionMap[imageName] = imageTag
}
return cmiiImageVersionMap
}
func FrontendBackendSrsImageMapFromCmiiImageMap(cmiiImageVersionMap map[string]string) (frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap map[string]string) {
frontendImageVersionMap = make(map[string]string)
backendImageVersionMap = make(map[string]string)
srsImageVersionMap = make(map[string]string)
for imageName, imageTag := range cmiiImageVersionMap {
if strings.Contains(imageName, "platform") {
frontendImageVersionMap[imageName] = imageTag
} else if strings.Contains(imageName, "srs") {
srsImageVersionMap[imageName] = imageTag
} else if strings.Contains(imageName, "operator") {
srsImageVersionMap[imageName] = imageTag
} else {
backendImageVersionMap[imageName] = imageTag
}
}
return frontendImageVersionMap, backendImageVersionMap, srsImageVersionMap
}

View File

@@ -0,0 +1,284 @@
package image
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"wdd.io/agent-common/real_project/zhejianyidong_erjipingtai"
"wdd.io/agent-common/real_project/zjjt"
"wdd.io/agent-common/utils"
)
var imageFullNameList = []string{
"bitnami/redis:6.2.6-debian-10-r0",
"simonrupf/chronyd:0.4.3",
"harbor.cdcyy.com.cn/cmii/cmii-rtsp-operator:v4.1.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v4.0.136",
"ossrs/srs:v4.0.136",
"mongo:5.0",
"bitnami/minio:2023.5.4",
"busybox:latest",
"busybox",
"rancher/rancher:v2.7.0",
"10.1.1.1:8033/cmii/ok:1.2",
"docker.107421.xyz/rancher/shell:v0.1.6",
}
func TestImageFullNameToGzipFileName(t *testing.T) {
for _, s := range imageFullNameList {
gzipFileName := ImageFullNameToGzipFileName(s)
fmt.Println(gzipFileName)
}
}
func TestGzipFileNameToImageFullName(t *testing.T) {
test := []string{
"cmlc=cmii=cmii-admin-data=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-mqtthandler=5.5.0.tar.gz",
"cmlc=cmii=cmii-admin-gateway=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-multilink=5.5.0.tar.gz",
"cmlc=cmii=cmii-admin-user=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-notice=5.5.0.tar.gz",
"cmlc=cmii=cmii-app-release=4.2.0-validation.tar.gz",
"cmlc=cmii=cmii-uav-oauth=5.5.0.tar.gz",
"cmlc=cmii=cmii-live-operator=5.2.0.tar.gz",
"cmlc=cmii=cmii-uav-platform=5.5.0.tar.gz",
"cmlc=cmii=cmii-open-gateway=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-ai-brain=5.5.0.tar.gz",
"cmlc=cmii=cmii-srs-oss-adaptor=2023-SA.tar.gz",
"cmlc=cmii=cmii-uav-platform-armypeople=5.5.0.tar.gz",
"cmlc=cmii=cmii-suav-platform-supervision=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-base=5.4.0.tar.gz",
"cmlc=cmii=cmii-suav-platform-supervisionh5=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-cms-portal=5.5.0.tar.gz",
"cmlc=cmii=cmii-suav-supervision=5.4.0-032501.tar.gz",
"cmlc=cmii=cmii-uav-platform-detection=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-airspace=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-jiangsuwenlv=4.1.3-jiangsu-0427.tar.gz",
"cmlc=cmii=cmii-uav-alarm=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-logistics=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-autowaypoint=4.2.0-beta.tar.gz",
"cmlc=cmii=cmii-uav-platform-media=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-brain=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-multiterminal=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-cloud-live=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-mws=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-cms=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-oms=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-data-post-process=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-open=5.5.0-0419.tar.gz",
"cmlc=cmii=cmii-uav-developer=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-qinghaitourism=4.1.0-21377-0508.tar.gz",
"cmlc=cmii=cmii-uav-device=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-security=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-emergency=5.3.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-securityh5=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-gateway=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-share=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-gis-server=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-platform-splice=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-grid-datasource=5.2.0-24810.tar.gz",
"cmlc=cmii=cmii-uav-platform-threedsimulation=5.2.0-21392.tar.gz",
"cmlc=cmii=cmii-uav-grid-engine=5.1.0.tar.gz",
"cmlc=cmii=cmii-uav-process=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-grid-manage=5.1.0.tar.gz",
"cmlc=cmii=cmii-uav-surveillance=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-industrial-portfolio=5.5.0-041801.tar.gz",
"cmlc=cmii=cmii-uav-threedsimulation=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-integration=5.5.0-0419.tar.gz",
"cmlc=cmii=cmii-uav-tower=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-kpi-monitor=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-user=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-logger=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-waypoint=5.5.0.tar.gz",
"cmlc=cmii=cmii-uav-material-warehouse=5.5.0.tar.gz",
"cmlc=cmii=srs=v5.0.195.tar.gz",
"cmlc=cmii=cmii-uav-mission=5.5.0.tar.gz",
}
test2 := []string{
"docker=rancher=backup-restore-operator=v1.0.3.tar.gz",
"docker=rancher=kube-api-auth = v0.1.4.tar.gz",
"docker=rancher=calico-cni=v3.17.2.tar.gz",
"docker=rancher=kubernetes-external-dns=v0.7.3.tar.gz",
"docker=rancher=calico-ctl=v3.17.2.tar.gz",
"docker=rancher=library-busybox=1.31.1.tar.gz",
"docker=rancher=calico-kube-controllers=v3.17.2.tar.gz",
"docker=rancher=library-busybox=1.32.1.tar.gz",
"docker=rancher=calico-node=v3.17.2.tar.gz",
"docker=rancher=library-nginx=1.19.2-alpine.tar.gz",
"docker=rancher=calico-pod2daemon-flexvol=v3.17.2.tar.gz",
"docker=rancher=library-traefik=1.7.19.tar.gz",
"docker=rancher=cis-operator=v1.0.3.tar.gz",
"docker=rancher=local-path-provisioner=v0.0.11.tar.gz",
"docker=rancher=cluster-proportional-autoscaler=1.7.1.tar.gz",
"docker=rancher=local-path-provisioner=v0.0.14.tar.gz",
"docker=rancher=cluster-proportional-autoscaler=1.8.1.tar.gz",
"docker=rancher=local-path-provisioner=v0.0.19.tar.gz",
"docker=rancher=configmap-reload=v0.3.0-rancher4.tar.gz",
"docker=rancher=log-aggregator=v0.1.7.tar.gz",
"docker=rancher=coredns-coredns=1.8.0.tar.gz",
"docker=rancher=metrics-server=v0.4.1.tar.gz",
"docker=rancher=coreos-etcd=v3.4.14-rancher1.tar.gz",
"docker=rancher=nginx-ingress-controller-defaultbackend=1.5-rancher1.tar.gz",
"docker=rancher=coreos-flannel=v0.13.0-rancher1.tar.gz",
"docker=rancher=nginx-ingress-controller=nginx-0.43.0-rancher1.tar.gz",
"docker=rancher=coreos-kube-state-metrics=v1.9.7.tar.gz",
"docker=rancher=opa-gatekeeper=v3.1.0-beta.7.tar.gz",
"docker=rancher=coreos-prometheus-config-reloader=v0.39.0.tar.gz",
"docker=rancher=openzipkin-zipkin=2.14.2.tar.gz",
"docker=rancher=coreos-prometheus-operator=v0.39.0.tar.gz",
"docker=rancher=pause=3.2.tar.gz",
"docker=rancher=externalip-webhook=v0.1.6.tar.gz",
"docker=rancher=plugins-docker=18.09.tar.gz",
"docker=rancher=flannel-cni=v0.3.0-rancher6.tar.gz",
"docker=rancher=prom-alertmanager=v0.21.0.tar.gz",
"docker=rancher=fleet-agent=v0.3.4.tar.gz",
"docker=rancher=prometheus-auth=v0.2.1.tar.gz",
"docker=rancher=fleet=v0.3.4.tar.gz",
"docker=rancher=prom-node-exporter=v1.0.1.tar.gz",
"docker=rancher=fluentd=v0.1.24.tar.gz",
"docker=rancher=prom-prometheus=v2.18.2.tar.gz",
"docker=rancher=grafana-grafana=7.1.5.tar.gz",
"docker=rancher=rancher-agent=v2.5.7.tar.gz",
"docker=rancher=hyperkube=v1.20.4-rancher1.tar.gz",
"docker=rancher=rancher=v2.5.7.tar.gz",
"docker=rancher=istio-kubectl=1.5.10.tar.gz",
"docker=rancher=rancher-webhook=v0.1.0-beta9.tar.gz",
"docker=rancher=jimmidyson-configmap-reload=v0.3.0.tar.gz",
"docker=rancher=rke-tools=v0.1.72.tar.gz",
"docker=rancher=k8s-dns-dnsmasq-nanny=1.15.2.tar.gz",
"docker=rancher=security-scan=v0.1.14.tar.gz",
"docker=rancher=k8s-dns-kube-dns=1.15.2.tar.gz",
"docker=rancher=security-scan=v0.2.2.tar.gz",
"docker=rancher=k8s-dns-node-cache=1.15.13.tar.gz",
"docker=rancher=shell=v0.1.6.tar.gz",
"docker=rancher=k8s-dns-sidecar=1.15.2.tar.gz",
"docker=rancher=sonobuoy-sonobuoy=v0.16.3.tar.gz",
"docker=rancher=klipper-lb=v0.1.2.tar.gz",
"docker=rancher=system-upgrade-controller=v0.6.2.tar.gz",
}
test3 := []string{
"docker=bitnami=redis=6.2.6-debian-10-r0.tar.gz",
"docker=bitnami=redis=6.2.14-debian-11-r1.tar.gz",
"docker=bitnami=mysql=8.0.35-debian-11-r1.tar.gz",
"docker=bitnami=mysql=8.1.0-debian-11-r42.tar.gz",
"docker=simonrupf=chronyd=0.4.3.tar.gz",
"docker=bitnami=bitnami-shell=10-debian-10-r140.tar.gz",
"docker=bitnami=bitnami-shell=11-debian-11-r136.tar.gz",
"docker=bitnami=rabbitmq=3.9.12-debian-10-r3.tar.gz",
"docker=bitnami=rabbitmq=3.11.26-debian-11-r2.tar.gz",
"docker=ossrs=srs=v4.0.136.tar.gz",
"docker=ossrs=srs=v5.0.195.tar.gz",
"docker=ossrs=srs=v4.0-r3.tar.gz",
"docker=emqx=emqx=4.2.12.tar.gz",
"docker=emqx=emqx=5.5.1.tar.gz",
"docker=nacos=nacos-server=v2.1.2.tar.gz",
"docker=nacos=nacos-server=v2.1.2-slim.tar.gz",
"docker=library=mongo=5.0.tar.gz",
"docker=library=rabbitmq=3.9-management.tar.gz",
"docker=bitnami=minio=2022.5.4.tar.gz",
"docker=bitnami=minio=2023.5.4.tar.gz",
"docker=kubernetesui=dashboard=v2.0.1.tar.gz",
"docker=kubernetesui=metrics-scraper=v1.0.4.tar.gz",
"docker=library=nginx=1.21.3.tar.gz",
"docker=library=redis=6.0.20-alpine.tar.gz",
"docker=dyrnq=nfs-subdir-external-provisioner=v4.0.2.tar.gz",
"docker=jerrychina2020=rke-tools=v0.175-linux.tar.gz",
"docker=library=busybox=latest.tar.gz",
}
test = append(test, test2...)
test = append(test, test3...)
for _, s := range test {
gzipFileName := GzipFileNameToImageFullName(s)
fmt.Println(gzipFileName)
fullName := ImageNameToTargetImageFullName(gzipFileName, "10.250.0.100:8033")
fmt.Println(fullName)
fmt.Println("--------------------")
}
}
func TestImageGzipFileNameToImageFullName_1(t *testing.T) {
gzipFilePrefix := "/root/octopus_image/middle/"
filepath.WalkDir(gzipFilePrefix, func(path string, d os.DirEntry, err error) error {
//fmt.Println(path)
name := d.Name()
if strings.HasSuffix(name, ".tar.gz") {
fullName := GzipFileNameToImageFullName(name)
fmt.Println(fullName)
}
return nil
})
}
func TestImageGzipFileNameToImageFullName(t *testing.T) {
frontendMap := make(map[string]string)
backendMap := make(map[string]string)
srsMap := make(map[string]string)
gzipFilePrefix := "/root/octopus_image/middle/"
filepath.WalkDir(gzipFilePrefix, func(path string, d os.DirEntry, err error) error {
//fmt.Println(path)
name := d.Name()
if strings.HasSuffix(name, ".tar.gz") {
imageName, imageTag := GzipFileNameToImageNameAndTag(name)
if strings.Contains(imageName, "platform") {
frontendMap[imageName] = imageTag
} else if strings.Contains(imageName, "srs") {
srsMap[imageName] = imageTag
} else if strings.Contains(imageName, "operator") {
srsMap[imageName] = imageTag
} else {
backendMap[imageName] = imageTag
}
}
return nil
})
utils.BeautifulPrint(backendMap)
utils.BeautifulPrint(frontendMap)
utils.BeautifulPrint(srsMap)
}
func TestFrontendBackendImageMapFromCmiiImageMap(t *testing.T) {
frontendImageVersionMap, backendImageVersionMap, _ := FrontendBackendSrsImageMapFromCmiiImageMap(zjjt.CmiiImageMap)
utils.BeautifulPrint(frontendImageVersionMap)
utils.BeautifulPrint(backendImageVersionMap)
}
func TestImageFullNameToImageNameAndTag(t *testing.T) {
for _, s := range imageFullNameList {
imageName, imageTag := ImageFullNameToImageNameAndTag(s)
fmt.Printf("%-8s %-8s %-8s\n", imageName, imageTag, s)
}
}
func TestImageFullNameToAppName(t *testing.T) {
for _, s := range imageFullNameList {
imageName := ImageFullNameToAppName(s)
fmt.Printf("%-8s %-8s\n", imageName, s)
}
}
func TestCmiiImageMapFromImageFullNameList(t *testing.T) {
imageList := zhejianyidong_erjipingtai.Cmii570ImageList
cmiiImageVersionMap := CmiiImageMapFromImageFullNameList(imageList)
utils.BeautifulPrint(cmiiImageVersionMap)
}

View File

@@ -0,0 +1,85 @@
package logger
import (
"fmt"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Logger struct represents a zap-based logger.
type Logger struct {
*zap.Logger
}
var Log, _ = NewLogger()
// NewLogger creates a new Logger instance.
func NewLogger() (*Logger, error) {
config := zap.Config{
Encoding: "json",
Level: zap.NewAtomicLevelAt(zap.DebugLevel),
OutputPaths: []string{"stdout"}, // 输出到控制台
ErrorOutputPaths: []string{"stderr"},
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "message",
LevelKey: "level",
TimeKey: "time",
//CallerKey: "caller",
EncodeLevel: zapcore.CapitalLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
//EncodeCaller: zapcore.ShortCallerEncoder,
},
Development: true,
}
logger, err := config.Build()
if err != nil {
return nil, err
}
return &Logger{logger}, nil
}
func (l *Logger) Printf(msg string, args ...interface{}) {
l.Logger.Info(fmt.Sprintf("%s ==> %v", msg, args))
}
// Debug logs a debug message.
func (l *Logger) Debug(msg string, fields ...zap.Field) {
l.Logger.Debug(msg, fields...)
}
func (l *Logger) DebugF(msg string, args ...interface{}) {
l.Logger.Debug(fmt.Sprintf(msg, args...))
}
// Info logs an info message.
func (l *Logger) Info(msg string, fields ...zap.Field) {
l.Logger.Info(msg, fields...)
}
// InfoF logs an info message with format
func (l *Logger) InfoF(msg string, args ...interface{}) {
l.Logger.Info(fmt.Sprintf(msg, args...))
}
// Warn logs a warning message.
func (l *Logger) Warn(msg string, fields ...zap.Field) {
l.Logger.Warn(msg, fields...)
}
func (l *Logger) WarnF(msg string, args ...interface{}) {
l.Logger.Warn(fmt.Sprintf(msg, args...))
}
// Error logs an error message.
func (l *Logger) ErrorF(msg string, args ...interface{}) {
l.Logger.Error(fmt.Sprintf(msg, args...))
}
// Fatal logs a fatal message and exits the program with a non-zero status code.
func (l *Logger) Fatal(msg string, fields ...zap.Field) {
l.Logger.Fatal(msg, fields...)
}

View File

@@ -0,0 +1,79 @@
package pusher
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"wdd.io/agent-common/logger"
)
var log = logger.Log
// CmiiUpdateMessage message_pusher/cmii/CmiiMessage.go
type CmiiUpdateMessage struct {
Namespace string
AppName string
FromTag string
ToTag string
Replicas string
DeployStatus bool
}
type Message struct { // TODO combine with server.message
ID string
Event string
Time int64
Topic string
Message string
Title string
Priority int
Tags []string
Click string
Icon string
// Additional fields
TopicURL string
SubscriptionID string
Raw string
}
func (c *CmiiUpdateMessage) SendMessage() (message Message) {
// 将结构体转换为JSON字符串
requestBytes, err := json.Marshal(c)
if err != nil {
fmt.Println("Error encoding request body to JSON:", err)
return
}
url := "http://192.168.35.71:8080/cmii/update" // 替换为实际的API地址
req, err := http.NewRequest("POST", url, bytes.NewBuffer(requestBytes))
if err != nil {
fmt.Println("Error creating request:", err)
return
}
// 添加请求头
req.Header.Set("Content-Type", "application/json")
// 发送请求并获取响应
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error sending request:", err)
}
defer resp.Body.Close()
bodyBytes, _ := io.ReadAll(resp.Body)
var m Message
err = json.Unmarshal(bodyBytes, &m)
if err != nil {
log.ErrorF("Error unmarshaling response body to JSON: %s", err.Error())
return message
}
return m
}

View File

@@ -0,0 +1,21 @@
package pusher
import (
"testing"
"wdd.io/agent-common/utils"
)
func TestCmiiUpdateMessage_SendMessage(t *testing.T) {
c := &CmiiUpdateMessage{
Namespace: "dev",
AppName: "cmii-uav-gateway",
FromTag: "5.1.0",
ToTag: "5.5.0",
Replicas: "2",
DeployStatus: false,
}
utils.BeautifulPrint(c)
c.SendMessage()
}

View File

@@ -0,0 +1,493 @@
package real_project
var Cmii620ArmImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sync:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-bridge:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-iot-dispatcher:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-sky-converge:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-fwdd:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-security-center:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-security-center:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-manager:6.2.0-szgz-arm",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL",
}
var Cmii620ImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-bridge:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-iot-dispatcher:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sync:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-security-center:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-fwdd:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-security-center:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:6.2.0-demo",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL",
}
var Cmii611ImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-bridge:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-security-center:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sync:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-iot-dispatcher:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-fwdd:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-security-center:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:6.1.1",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL",
}
var Cmii600ImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-ruoyi:2024102802",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.0.0-snapshot-1026-db-confidence-bird",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.0.0-31369-102401",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.0.0-31369-yunnan-092402",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.7.0-32108-0930",
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.0.0-102001",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.0.0-102901",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.0.0-32443-102201",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:5.7.0-hjltt",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.0.0-092502",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.0.0-102301",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:5.7.0-29766-0815",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.0.0-master600",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.0.0-31981",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.0.0",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
}
var Cmii570ImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.7.0-29766-0815",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:5.7.0-30403",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:5.7.0-0805",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.7.0-29766-0819",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.7.0-hw-080201",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.7.0-0814",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.7.0-31369-yunnan-082001",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.7.0-29766-0815",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.7.0-0725",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.7.0-0820",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:5.7.0-29322",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.7.0-29267-0820",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:5.3.0-hjltt",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:5.7.0-29322",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:5.7.0-29766-0815",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
}
var Cmii580ImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.7.0-31369-yunnan-082702",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:5.7.0-30403",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:5.7.0-29322",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.2.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:5.7.0-29766-0815",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:5.7.0-hjltt",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:5.7.0-29322",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.8.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.7.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.8.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
}

View File

@@ -0,0 +1,140 @@
package bgtg
var AllCmiiImageTagList = []string{
"cmii-uav-tower:5.4.0-0319",
"cmii-uav-platform-logistics:5.4.0",
"cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"cmii-uav-platform-securityh5:5.4.0",
"cmii-uav-platform:5.4.0-25263-041102",
"cmii-uav-platform-ai-brain:5.4.0",
"cmii-uav-emergency:5.3.0",
"cmii-uav-kpi-monitor:5.4.0",
"cmii-uav-platform-splice:5.4.0-040301",
"cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"cmii-live-operator:5.2.0",
"cmii-uav-gateway:5.4.0",
"cmii-uav-platform-security:4.1.6",
"cmii-uav-integration:5.4.0-25916",
"cmii-uav-notice:5.4.0",
"cmii-uav-platform-open:5.4.0",
"cmii-srs-oss-adaptor:2023-SA",
"cmii-admin-gateway:5.4.0",
"cmii-uav-process:5.4.0-0410",
"cmii-suav-supervision:5.4.0-032501",
"cmii-uav-platform-cms-portal:5.4.0",
"cmii-uav-platform-multiterminal:5.4.0",
"cmii-admin-data:5.4.0-0403",
"cmii-uav-cloud-live:5.4.0",
"cmii-uav-grid-datasource:5.2.0-24810",
"cmii-uav-platform-qingdao:4.1.6-24238-qingdao",
"cmii-admin-user:5.4.0",
"cmii-uav-industrial-portfolio:5.4.0-28027-041102",
"cmii-uav-alarm:5.4.0-0409",
"cmii-uav-clusters:5.2.0",
"cmii-uav-platform-oms:5.4.0",
"cmii-uav-platform-hljtt:5.3.0-hjltt",
"cmii-uav-platform-mws:5.4.0",
"cmii-uav-autowaypoint:4.1.6-cm",
"cmii-uav-grid-manage:5.1.0",
"cmii-uav-platform-share:5.4.0",
"cmii-uav-cms:5.3.0",
"cmii-uav-oauth:5.4.0-032901",
"cmii-open-gateway:5.4.0",
"cmii-uav-data-post-process:5.4.0",
"cmii-uav-multilink:5.4.0-032701",
"cmii-uav-platform-media:5.4.0",
"cmii-uav-platform-visualization:5.2.0",
"cmii-uav-platform-emergency-rescue:5.2.0",
"cmii-app-release:4.2.0-validation",
"cmii-uav-device:5.4.0-28028-0409",
"cmii-uav-gis-server:5.4.0",
"cmii-uav-brain:5.4.0",
"cmii-uav-depotautoreturn:5.4.0",
"cmii-uav-threedsimulation:5.1.0",
"cmii-uav-grid-engine:5.1.0",
"cmii-uav-developer:5.4.0-040701",
"cmii-uav-waypoint:5.4.0-032901",
"cmii-uav-platform-base:5.4.0",
"cmii-uav-platform-threedsimulation:5.2.0-21392",
"cmii-uav-platform-detection:5.4.0",
"cmii-uav-logger:5.4.0-0319",
"cmii-uav-platform-seniclive:5.2.0",
"cmii-suav-platform-supervisionh5:5.4.0",
"cmii-uav-user:5.4.0",
"cmii-uav-surveillance:5.4.0-28028-0409",
"cmii-uav-mission:5.4.0-28028-041006",
"cmii-uav-mqtthandler:5.4.0-25916-041001",
"cmii-uav-material-warehouse:5.4.0-0407",
"cmii-uav-platform-armypeople:5.4.0-041201",
"cmii-suav-platform-supervision:5.4.0",
"cmii-uav-airspace:5.4.0-0402",
}
var AllCMiiImageFullNameList560 = []string{
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.7.0-30015-29835-071601",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.5.0-30015-061801",
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:5.6.0-0716",
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.6.0-061202",
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.6.0-062401",
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.6.0-062602",
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.6.0-062601",
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.6.0-060601",
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.6.0-30015-070801",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.6.0-0715",
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.6.0-071601",
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.6.0-30067-071604",
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.5.0",
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.6.0-070401",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.6.0-0710",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.6.0-0704",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.6.0-29267-0717",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.6.0-0708",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:5.6.0-0709",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:5.6.0-0709",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.6.0-28028-071102",
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.6.0",
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,420 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "bjtg",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30554
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [ "" ]
resources: [ "secrets" ]
resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ]
verbs: [ "get", "update", "delete" ]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [ "" ]
resources: [ "configmaps" ]
resourceNames: [ "kubernetes-dashboard-settings" ]
verbs: [ "get", "update" ]
# Allow Dashboard to get metrics.
- apiGroups: [ "" ]
resources: [ "services" ]
resourceNames: [ "heapster", "dashboard-metrics-scraper" ]
verbs: [ "proxy" ]
- apiGroups: [ "" ]
resources: [ "services/proxy" ]
resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ]
verbs: [ "get" ]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: [ "metrics.k8s.io" ]
resources: [ "pods", "nodes" ]
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 10.250.0.200:8033/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: { }
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 10.250.0.200:8033/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: { }
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,274 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: bjtg
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "bjtg"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
# auth.client.1.clientid = admin
# auth.client.1.password = 4YPk*DS%+5
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
affinity: { }
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 10.250.0.200:8033/cmii/emqx:4.4.9
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: { }
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: bjtg
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: bjtg
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: bjtg
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,604 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: bjtg
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
spec:
rules:
- host: fake-domain.bjtg.io
http:
paths:
- path: /bjtg/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /bjtg/supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /bjtg/supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /bjtg/pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /bjtg/ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /bjtg/armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /bjtg/base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /bjtg/cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /bjtg/detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /bjtg/dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /bjtg/emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /bjtg/hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /bjtg/jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /bjtg/logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /bjtg/media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /bjtg/multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /bjtg/mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /bjtg/oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /bjtg/open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /bjtg/qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /bjtg/qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /bjtg/security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /bjtg/securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /bjtg/seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /bjtg/share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /bjtg/splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /bjtg/threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /bjtg/traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /bjtg/uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /bjtg/uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /bjtg/visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: bjtg
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-bjtg.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: bjtg
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.bjtg.io
http:
paths:
- path: /bjtg/oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /bjtg/open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /bjtg/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.200:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,423 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
annotations: { }
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create
user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create
user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all
on *.* to zyly_qc@'%';
create
user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all
on *.* to k8s_admin@'%';
create
user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all
on *.* to audit_dba@'%';
create
user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT
SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT
on *.* to db_backup@'%';
create
user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION
CLIENT on *.* to monitor@'%';
flush
privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: bjtg
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: { }
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 10.250.0.200:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: { }
requests: { }
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/bjtg/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.200:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 10.250.0.200:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
imagePullSecrets:
- name: harborsecret
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.200:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value:
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server:
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: bjtg
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: bjtg
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: bjtg
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: { }
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: { }
requests: { }
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 10.250.0.200:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: { }
requests: { }
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: bjtg
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://10.250.0.200:8888;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 10.250.0.200:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 10.250.0.200
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: bjtg/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: bjtg/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 10.250.0.200:8033/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: bjtg/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 10.250.0.200:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.6.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.6.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.6.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://10.250.0.200:30935'
rtsp: 'rtsp://10.250.0.200:30554'
srt: 'srt://10.250.0.200:30556'
flv: 'http://10.250.0.200:30500'
hls: 'http://10.250.0.200:30500'
rtc: 'webrtc://10.250.0.200:30090'
replay: 'https://10.250.0.200:30333'
minio:
endpoint: http://:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,420 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: bjtg
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "demo",
CloudHOST: "10.250.0.200:8888",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30554
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [ "" ]
resources: [ "secrets" ]
resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ]
verbs: [ "get", "update", "delete" ]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [ "" ]
resources: [ "configmaps" ]
resourceNames: [ "kubernetes-dashboard-settings" ]
verbs: [ "get", "update" ]
# Allow Dashboard to get metrics.
- apiGroups: [ "" ]
resources: [ "services" ]
resourceNames: [ "heapster", "dashboard-metrics-scraper" ]
verbs: [ "proxy" ]
- apiGroups: [ "" ]
resources: [ "services/proxy" ]
resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ]
verbs: [ "get" ]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: [ "metrics.k8s.io" ]
resources: [ "pods", "nodes" ]
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 10.250.0.200:8033/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: { }
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 10.250.0.200:8033/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: { }
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,274 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: bjtg
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "bjtg"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
# auth.client.1.clientid = admin
# auth.client.1.password = 4YPk*DS%+5
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
affinity: { }
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 10.250.0.200:8033/cmii/emqx:4.4.9
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: { }
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: bjtg
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: bjtg
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: bjtg
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: bjtg
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,604 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: bjtg
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
spec:
rules:
- host: fake-domain.bjtg.io
http:
paths:
- path: /demo/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /demo/supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /demo/supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /demo/pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /demo/ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /demo/armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /demo/base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /demo/cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /demo/detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /demo/dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /demo/emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /demo/hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /demo/jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /demo/logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /demo/media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /demo/multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /demo/mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /demo/oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /demo/open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /demo/qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /demo/qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /demo/security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /demo/securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /demo/seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /demo/share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /demo/splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /demo/threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /demo/traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /demo/uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /demo/uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /demo/visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: bjtg
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-demo.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: bjtg
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.bjtg.io
http:
paths:
- path: /demo/oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /demo/open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /demo/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: helm-mongo
image: 10.250.0.200:8033/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,423 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
annotations: { }
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create
user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create
user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all
on *.* to zyly_qc@'%';
create
user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all
on *.* to k8s_admin@'%';
create
user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all
on *.* to audit_dba@'%';
create
user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT
SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT
on *.* to db_backup@'%';
create
user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION
CLIENT on *.* to monitor@'%';
flush
privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: bjtg
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: bjtg
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: { }
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 10.250.0.200:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: { }
requests: { }
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/bjtg/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: bjtg
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.6.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.6.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: nacos-server
image: 10.250.0.200:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 10.250.0.200:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
imagePullSecrets:
- name: harborsecret
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.250.0.200:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value:
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server:
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.6.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: bjtg
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: bjtg
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: bjtg
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: bjtg
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: { }
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 10.250.0.200:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: { }
requests: { }
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 10.250.0.200:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: { }
requests: { }
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: bjtg
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: bjtg
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: bjtg
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.250.0.200:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.bjtg.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: bjtg
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://10.250.0.200:8888;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 10.250.0.200:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 10.250.0.200
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: bjtg/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: bjtg/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 10.250.0.200:8033/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: bjtg/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 10.250.0.200:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: bjtg
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.6.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.6.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.6.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://10.250.0.200:30935'
rtsp: 'rtsp://10.250.0.200:30554'
srt: 'srt://10.250.0.200:30556'
flv: 'http://10.250.0.200:30500'
hls: 'http://10.250.0.200:30500'
rtc: 'webrtc://10.250.0.200:30090'
replay: 'https://10.250.0.200:30333'
minio:
endpoint: http://:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,130 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uav-iot-dispatcher
namespace: hbyd
labels:
app.kubernetes.io/app-version: 5.7.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-iot-dispatcher
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uav-iot-dispatcher
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uav-iot-dispatcher
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
containers:
- name: cmii-uav-iot-dispatcher
image: '192.168.0.10:8033/cmii/cmii-uav-iot-dispatcher:6.1.0'
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: ENV
value: develop
- name: VERSION
value: 6.0.0
- name: NACOS_SYSTEM_CONFIG_NAME
value: cmii-backend-system
- name: NACOS_SERVICE_CONFIG_NAME
value: cmii-uav-iot-dispatcher
- name: NACOS_SERVER_ADDRESS
value: 'helm-nacos:8848'
- name: NACOS_NAMESPACE_ID
value: public
- name: NACOS_USERNAME
value: nacos
- name: NACOS_PASSWORD
value: KingKong@95461234
- name: SVC_NAME
value: cmlc-uav-iot-dispatcher-svc
- name: K8S_NAMESPACE
value: hbyd
- name: APPLICATION_NAME
value: cmii-uav-iot-dispatcher
- name: CUST_JAVA_OPTS
value: '-Xms500m -Xmx2500m -Dlog4j2.formatMsgNoLookups=true'
- name: NACOS_REGISTRY
value: 'helm-nacos:8848'
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 6.0.0
- name: SYS_CONFIG_GROUP
value: 6.0.0
- name: IMAGE_VERSION
value: 6.0.0
resources:
limits:
cpu: '2'
memory: 3Gi
requests:
cpu: 200m
memory: 500Mi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: xmyd/cmii-uav-iot-dispatcher
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uav-iot-dispatcher
namespace: hbyd
labels:
app.kubernetes.io/app-version: 5.7.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-iot-dispatcher
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-uav-iot-dispatcher
cmii.type: backend
type: ClusterIP
sessionAffinity: None

View File

@@ -0,0 +1,148 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-watchdog-agent
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog-agent
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-watchdog-agent
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog-agent
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- devflight
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-watchdog-agent
image: harbor.cdcyy.com.cn/cmii/cmii-uav-watchdog-agent-java:2025-03-26-17-09-09
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-watchdog-agent
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: LIMIT_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog-agent
resource: limits.cpu
- name: LIMIT_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog-agent
resource: limits.memory
- name: REQUEST_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog-agent
resource: requests.cpu
- name: REQUEST_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog-agent
resource: requests.memory
- name: WATCHDOG_HEARTBEAT_URL
value: http://cmii-uav-watchdog:8080/api/heartbeat
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: uavcloud-devflight/cmii-uav-watchdog-agent
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-watchdog-agent
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog-agent
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
type: ClusterIP
selector:
cmii.type: backend
cmii.app: cmii-uav-watchdog-agent
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---

View File

@@ -0,0 +1,178 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-watchdog
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- devflight
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-watchdog
image: harbor.cdcyy.com.cn/cmii/cmii-uav-watchdog:2025-03-26-17-33-09
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-watchdog
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: LIMIT_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: limits.cpu
- name: LIMIT_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: limits.memory
- name: REQUEST_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: requests.cpu
- name: REQUEST_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: requests.memory
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/cmii-uav-watchdog/
readOnly: false
subPath: uavcloud-devflight/cmii-uav-watchdog
- name: cmii-uav-watchdog-conf
mountPath: /cmii/cmii-uav-watchdog/config.yaml
subPath: config.yaml
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: cmii-uav-watchdog-conf
configMap:
name: cmii-uav-watchdog-configmap
items:
- key: config.yaml
path: config.yaml
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-watchdog
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: backend
cmii.app: cmii-uav-watchdog
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
nodePort: 34567
---
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-uav-watchdog-configmap
namespace: uavcloud-devflight
data:
config.yaml: |-
server:
port: "8080" # 服务器端口
tier_one_auth:
tier_one_secret: "NK537TIWSUOFIS7SYCUJ6A7FPOGFVM3UH67TJRX3IYQAHKZXK2X7SBAA6JOXZVSV3U6K5YZUX7Q6TWOPK6YCRU6MIML33ZJFBN55I2Q" # TOTP密钥
time_offset_allowed: 30 # 允许的时间偏移(秒)
watchdog_center:
url: "https://watchdog-center.example.com" # 一级授权中心地址
project:
project_namespace: "uavcloud-devflight" # 项目命名空间
tier_two_auth:
tier_two_secret: "your_tier_two_secret_here" # 二级授权密钥

View File

@@ -0,0 +1,138 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: pyfusion-configmap
namespace: uavcloud-devflight
data:
config.yaml: |-
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmlc"
password: "4YPk*DS%+5"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uavms-pyfusion
namespace: uavcloud-devflight
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: pyfusion-conf
configMap:
name: pyfusion-configmap
items:
- key: config.yaml
path: config.yaml
containers:
- name: cmii-uavms-pyfusion
image: 'harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:6.2.0'
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: VERSION
value: 6.2.0
- name: NACOS_SYSTEM_CONFIG_NAME
value: cmii-backend-system
- name: NACOS_SERVICE_CONFIG_NAME
value: cmii-uavms-pyfusion
- name: NACOS_SERVER_ADDRESS
value: 'helm-nacos:8848'
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uavms-pyfusion
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
resources:
limits:
cpu: '2'
memory: 3Gi
requests:
cpu: 200m
memory: 500Mi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: uavcloud-devflight/cmii-uavms-pyfusion
- name: pyfusion-conf
mountPath: /app/config.yaml
subPath: config.yaml
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uavms-pyfusion
namespace: uavcloud-devflight
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
type: ClusterIP
sessionAffinity: None

View File

@@ -0,0 +1,91 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-renyike
namespace: uavcloud-devoperation
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-renyike
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-renyike
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-renyike
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-renyike
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-renyike:6.0.0-20241202
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devoperation
- name: APPLICATION_NAME
value: cmii-uav-platform-renyike
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-splice
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-renyike
namespace: uavcloud-devoperation
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-renyike
octopus.control: frontend-app-wdd
app.kubernetes.io/version: 5.7.0
spec:
type: NodePort
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-renyike
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
nodePort: 33333
---

View File

@@ -0,0 +1,271 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-classification
namespace: uavcloud-devflight
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-classification
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-classification
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-classification
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-classification
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-classification:5.6.0
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-platform-classification
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-splice
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-classification
namespace: uavcloud-devflight
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-classification
octopus.control: frontend-app-wdd
app.kubernetes.io/version: 5.7.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-classification
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-scanner
namespace: uavcloud-devflight
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-scanner
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-scanner
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-scanner
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-scanner
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-scanner:5.6.0
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-platform-scanner
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-splice
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-scanner
namespace: uavcloud-devflight
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-scanner
octopus.control: frontend-app-wdd
app.kubernetes.io/version: 5.7.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-scanner
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-blockchain
namespace: uavcloud-devflight
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-blockchain
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-blockchain
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-blockchain
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-blockchain
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-blockchain:5.6.0
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-platform-blockchain
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-splice
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-blockchain
namespace: uavcloud-devflight
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-blockchain
octopus.control: frontend-app-wdd
app.kubernetes.io/version: 5.7.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-blockchain
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---

View File

@@ -0,0 +1,561 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-blockchain
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-blockchain
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 0
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-blockchain
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-blockchain
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- devflight
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-blockchain
image: harbor.cdcyy.com.cn/cmii/cmii-uav-blockchain:3.2.2-snapshot
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-blockchain
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 5.7.0
- name: SYS_CONFIG_GROUP
value: 5.7.0
- name: IMAGE_VERSION
value: 5.7.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: uavcloud-devflight/cmii-uav-blockchain
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-blockchain
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-blockchain
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
type: ClusterIP
selector:
cmii.type: backend
cmii.app: cmii-uav-blockchain
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-container-scanner
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 0
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- devflight
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-container-scanner
image: harbor.cdcyy.com.cn/cmii/cmii-uav-container-scanner:5.6.0
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-container-scanner
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 5.7.0
- name: SYS_CONFIG_GROUP
value: 5.7.0
- name: IMAGE_VERSION
value: 5.7.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: uavcloud-devflight/cmii-uav-container-scanner
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-container-scanner
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
type: ClusterIP
selector:
cmii.type: backend
cmii.app: cmii-uav-container-scanner
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-container-scanner-go
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner-go
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 0
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner-go
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner-go
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- devflight
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-container-scanner-go
image: harbor.cdcyy.com.cn/cmii/cmii-uav-container-scanner-go:5.6.0
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-container-scanner-go
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 5.7.0
- name: SYS_CONFIG_GROUP
value: 5.7.0
- name: IMAGE_VERSION
value: 5.7.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: uavcloud-devflight/cmii-uav-container-scanner-go
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-container-scanner-go
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-container-scanner-go
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
type: ClusterIP
selector:
cmii.type: backend
cmii.app: cmii-uav-container-scanner-go
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-data-classification
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-data-classification
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
replicas: 0
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-data-classification
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-data-classification
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- devflight
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-data-classification
image: harbor.cdcyy.com.cn/cmii/cmii-uav-data-classification:5.6.0
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uav-data-classification
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 5.7.0
- name: SYS_CONFIG_GROUP
value: 5.7.0
- name: IMAGE_VERSION
value: 5.7.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: uavcloud-devflight/cmii-uav-data-classification
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-data-classification
namespace: uavcloud-devflight
labels:
cmii.type: backend
cmii.app: cmii-uav-data-classification
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 5.7.0
spec:
type: ClusterIP
selector:
cmii.type: backend
cmii.app: cmii-uav-data-classification
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,448 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: cqejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.115.174:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [ "" ]
resources: [ "secrets" ]
resourceNames: [ "kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf" ]
verbs: [ "get", "update", "delete" ]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [ "" ]
resources: [ "configmaps" ]
resourceNames: [ "kubernetes-dashboard-settings" ]
verbs: [ "get", "update" ]
# Allow Dashboard to get metrics.
- apiGroups: [ "" ]
resources: [ "services" ]
resourceNames: [ "heapster", "dashboard-metrics-scraper" ]
verbs: [ "proxy" ]
- apiGroups: [ "" ]
resources: [ "services/proxy" ]
resourceNames: [ "heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper" ]
verbs: [ "get" ]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: [ "metrics.k8s.io" ]
resources: [ "pods", "nodes" ]
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: { }
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: { }
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,274 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: cqejpt
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: cqejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "cqejpt"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: cqejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
# auth.client.1.clientid = admin
# auth.client.1.password = 4YPk*DS%+5
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: cqejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
spec:
affinity: { }
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/emqx:4.4.9
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: { }
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: cqejpt
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: cqejpt
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: cqejpt
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: cqejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: cqejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,624 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: cqejpt
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
spec:
rules:
- host: fake-domain.cqejpt.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: cqejpt
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-cqejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: cqejpt
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.cqejpt.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: cqejpt
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: cqejpt
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: helm-mongo
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/mongo:5.0
resources: { }
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,423 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: cqejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
annotations: { }
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: cqejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: cqejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: cqejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create
user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create
user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all
on *.* to zyly_qc@'%';
create
user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all
on *.* to k8s_admin@'%';
create
user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all
on *.* to audit_dba@'%';
create
user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT
SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT
on *.* to db_backup@'%';
create
user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION
CLIENT on *.* to monitor@'%';
flush
privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: cqejpt
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: cqejpt
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: cqejpt
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: cqejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: cqejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: cqejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: { }
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: cqejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: cqejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: cqejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: { }
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: { }
requests: { }
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/cqejpt/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: cqejpt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.7.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: cqejpt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.7.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: cqejpt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.7.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 5.7.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: { }
containers:
- name: nacos-server
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 192.168.9.11
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.9.11
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: cqejpt
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.7.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: cqejpt
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.7.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: cqejpt
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.7.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: cqejpt
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 5.7.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: cqejpt
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: cqejpt
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: cqejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: cqejpt
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: { }
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: { }
requests: { }
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: { }
requests: { }
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: cqejpt
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: cqejpt
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: cqejpt
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: cqejpt
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: { }
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: tmp
emptyDir: { }
- name: redis-data
emptyDir: { }
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: cqejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: cqejpt
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: { }
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: cqejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.cqejpt.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "100m"
memory: 1Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: { }
- name: redis-data
emptyDir: { }

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: cqejpt
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://36.133.115.174:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 36.133.115.174
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: cqejpt/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: cqejpt/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://192.168.9.11:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: cqejpt/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: cqejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.7.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.7.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.7.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://36.133.115.174:31935'
rtsp: 'rtsp://36.133.115.174:30554'
srt: 'srt://36.133.115.174:30556'
flv: 'http://36.133.115.174:30500'
hls: 'http://36.133.115.174:30500'
rtc: 'webrtc://36.133.115.174:30090'
replay: 'https://36.133.115.174:30333'
minio:
endpoint: http://192.168.9.11:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,114 @@
#!/bin/bash
local_host_ip=20.4.16.200
# all_host_ip_list=(20.4.13.81 20.4.13.140 20.4.13.92 20.4.13.80)
all_host_ip_list=(20.4.13.80)
pass=V2ryStr@ngPss
copy_ssh_key_to_master(){
scp /root/.ssh/id_rsa root@20.4.13.81:/root/.ssh/id_rsa
scp /root/.ssh/id_rsa.pub root@20.4.13.81:/root/.ssh/id_rsa.pub
}
copy_ssh_key_to_master
install_nfs_server_suffix(){
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "systemctl start rpcbind && systemctl enable rpcbind && systemctl start nfs-server && systemctl enable nfs-server"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "mkdir -p /var/lib/docker/nfs_data && chmod 777 /var/lib/docker/nfs_data"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "echo \"/var/lib/docker/nfs_data *(rw,no_root_squash,no_all_squash,sync)\" >> /etc/exports"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "systemctl restart rpcbind && systemctl restart nfs-server"
ssh -i /root/.ssh/id_rsa root@20.4.13.81 "rpcinfo -p localhost"
}
# install_nfs_server_suffix
install_all_demand_softwares(){
local host
local dep_file_list=(tar-1.32-4.oe2003sp4.x86_64.rpm common_tool-openEuler-20.03-LTS-SP4.tar.gz nfs_utils-openEuler-20.03-LTS-SP4.tar.gz nginx-openEuler-20.03-LTS-SP4.tar.gz ntp-openEuler-20.03-LTS-SP4.tar.gz)
for host in ${all_host_ip_list[@]}
do
echo "current host is ${host}"
ssh -i /root/.ssh/id_rsa root@${host} "echo yes"
ssh -i /root/.ssh/id_rsa root@${host} "systemctl start rpcbind && systemctl enable rpcbind && systemctl start ntpd && systemctl enable ntpd"
# ssh -i /root/.ssh/id_rsa root@${host} "mkdir -p /root/wdd/dep/"
# for dep in ${dep_file_list[@]}
# do
# echo "dep file is ${dep}"
#
# ssh -i /root/.ssh/id_rsa root@${host} "wget http://20.4.16.200:9000/octopus/euler/${dep} -O /root/wdd/dep/${dep}"
# ssh -i /root/.ssh/id_rsa root@${host} "rpm -ivh /root/wdd/dep/tar*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf common_tool-openEuler-20.03-LTS-SP4.tar.gz && cd ./common_tool && rpm -ivh --force ./*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf nfs_utils-openEuler-20.03-LTS-SP4.tar.gz && cd ./nfs_utils && rpm -ivh --force ./*.rpm"
# ssh -i /root/.ssh/id_rsa root@${host} "cd /root/wdd/dep/ && tar -zvxf ntp-openEuler-20.03-LTS-SP4.tar.gz && cd ./ntp && rpm -ivh --force ./*.rpm"
#
# done
echo ""
done
}
# install_all_demand_softwares
test_base_command_exits() {
local base_command_list=(ifconfig mtr vgdisplay nslookup vim htop tar unzip iftop curl wget netstat git zsh)
local command
for command in "${base_command_list[@]}"; do
if command -v "$command" &>/dev/null; then
echo "$command exists"
else
echo "ERROR $command does not exist!"
fi
echo ""
done
}
test_service_exists(){
local base_service_list=(ntpd chronyd nginx nfs-server rpcbind docker)
local service
for service in "${base_service_list[@]}"; do
if ! systemctl list-unit-files | grep "$service.service"; then
echo "ERROR $service.service does not exist!"
fi
echo ""
done
}
# test_base_command_exits
# test_service_exists
change_host_name(){
hostnamectl set-hostname master-node
}
install_ssh_key(){
echo "" >> /root/.ssh/authorized_keys
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL9S6CSAjTFe2fy4bAIfqu90ft6E+GBRvS59kno6LDeAbqUQNYf9hEnIg07Ee/x5DlnYE0S3Ykv3WCHuVyBH2zANnC0P87SqphDGmoqdqF3r6uDaCr4lBsUqEai9X2q6dyjZj6ym+r4zQhMApNDzbhcyfKQ54tKFylGIdx6siyktuU/VbOzWc6G8r+BfFsQpMCA1ihmCY1jGjsKPqFlZGLeTrlBb1Zk0OV+GtDhlf/t0cd0kRPJoydm2juTXrZO+tFmf9turfKZsBnRYKtQBLJG5mF1hsjIqo8DHr+PUL2wRrSxEhGTZiJL4rNJo/kHhKXXsomc5RM/AnfgAfxrLlH zeasl@DESKTOP-K2F9GG3 ">> /root/.ssh/authorized_keys
echo "" >> /root/.ssh/authorized_keys
}
install_octopus_server_offline(){
bash <(curl -sL http://20.4.16.200:9000/octopus/init-script-wdd.sh) --url http://20.4.16.200:9000/octopus --agent-install --offline
bash <(curl -sL http://20.4.16.200:9000/octopus/init-script-wdd.sh) --url http://20.4.16.200:9000/octopus --agent-remove --offline
cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back
sed -i "s/StrictModes yes/StrictModes no/g" /etc/ssh/sshd_config
sed -i "s/AllowTcpForwarding no/AllowTcpForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/AllowAgentForwarding no/AllowAgentForwarding yes/g" /etc/ssh/sshd_config
sed -i "s/PermitTunnel no/PermitTunnel yes/g" /etc/ssh/sshd_config
systemctl restart sshd
}
machinId(){
20.4.13.81 Chongqing-amd64-01 354d6db5354d6db5354d6db5354d6db5
20.4.13.140 Chongqing-amd64-02 2a216db5354d6db5354d6db5354d6db5
20.4.13.92 Chongqing-amd64-03 3ca26db5354d6db5354d6db5354d6db5
20.4.13.80 Chongqing-amd64-04 4ea1d6db5354d6db5354d6db5354d6db
}

Some files were not shown because too many files have changed in this diff Show More